code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.scenarios.storage.storagecapacity.StorageCapacity
import mock
import unittest
import os
import json
from yardstick.benchmark.scenarios.storage import storagecapacity
DISK_SIZE_SAMPLE_OUTPUT = '{"Numberf of devides": "2", "Total disk size in bytes": "1024000000"}'
BLOCK_SIZE_SAMPLE_OUTPUT = '{"/dev/sda": 1024, "/dev/sdb": 4096}'
DISK_UTIL_RAW_OUTPUT = "vda 10.00\nvda 0.00"
DISK_UTIL_SAMPLE_OUTPUT = '{"vda": {"avg_util": 5.0, "max_util": 10.0, "min_util": 0.0}}'
@mock.patch('yardstick.benchmark.scenarios.storage.storagecapacity.ssh')
class StorageCapacityTestCase(unittest.TestCase):
def setUp(self):
self.scn = {
"options": {
'test_type': 'disk_size'
}
}
self.ctx = {
"host": {
'ip': '172.16.0.137',
'user': 'cirros',
'password': "root"
}
}
self.result = {}
def test_capacity_successful_setup(self, mock_ssh):
c = storagecapacity.StorageCapacity(self.scn, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
self.assertIsNotNone(c.client)
self.assertTrue(c.setup_done)
def test_capacity_disk_size_successful(self, mock_ssh):
c = storagecapacity.StorageCapacity(self.scn, self.ctx)
mock_ssh.SSH().execute.return_value = (0, DISK_SIZE_SAMPLE_OUTPUT, '')
c.run(self.result)
expected_result = json.loads(DISK_SIZE_SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_block_size_successful(self, mock_ssh):
args = {
"options": {
'test_type': 'block_size'
}
}
c = storagecapacity.StorageCapacity(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, BLOCK_SIZE_SAMPLE_OUTPUT, '')
c.run(self.result)
expected_result = json.loads(BLOCK_SIZE_SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_disk_utilization_successful(self, mock_ssh):
args = {
"options": {
'test_type': 'disk_utilization',
'interval': 1,
'count': 2
}
}
c = storagecapacity.StorageCapacity(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, DISK_UTIL_RAW_OUTPUT, '')
c.run(self.result)
expected_result = json.loads(DISK_UTIL_SAMPLE_OUTPUT)
self.assertEqual(self.result, expected_result)
def test_capacity_unsuccessful_script_error(self, mock_ssh):
c = storagecapacity.StorageCapacity(self.scn, self.ctx)
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, c.run, self.result)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
dtudares/hello-world
|
yardstick/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py
|
Python
|
apache-2.0
| 3,405
|
#coding=utf-8
from functools import wraps
from flask import abort
from flask_login import current_user
from app.models import Permission
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.can(permission):
abort(403)
return f(*args, **kwargs)
return decorated_function
return decorator
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f)
|
ricardonhuang/blog
|
app/decorators.py
|
Python
|
gpl-3.0
| 516
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 03 11:56:58 2014
@author: paulinkenbrandt
"""
import arcpy
import numpy as np
import pandas as pd
infile = arcpy.GetParameterAsText(0)
#input = "C:\\Temp\\test.gdb\\WQP_Results"
#
#tview = arcpy.MakeTableView_management(input)
arr = arcpy.da.TableToNumPyArray(infile, ('Param', 'ResultValue','SampleId'))
param = arr['Param']
result = arr['ResultValue']
smid = arr['SampleId']
shortparam = []
shortresult = []
shortname = []
shortsmid = []
chemlist = ['Sulfate', 'Nitrate', 'Nitrite', 'Calcium', 'Potassium', 'Magnesium','Sodium', 'Sodium plus potassium', 'Bicarbonate', 'Carbonate', 'Chloride']
chemdict = {'Ammonia-nitrogen as N':'N','Inorganic nitrogen (nitrate and nitrite) as N':'N','Inorganic nitrogen (nitrate and nitrite)':'N','Kjeldahl nitrogen':'N','Total dissolved solids':'TDS','Sulfate as SO4':'SO4','pH, lab':'pH','Temperature, water':'Temp_C','Arsenic':'As','Bromide':'Br','Carbon dioxide':'CO2', 'Specific Conductance':'Cond','Conductivity':'Cond', 'Sulfate':'SO4', 'Nitrate':'NO3', 'Nitrite':'NO2','Magnesium':'Mg', 'Calcium':'Ca', 'Potassium':'K', 'Sodium':'Na', 'Sodium plus potassium':'NaK', 'Bicarbonate':'HCO3', 'Carbonate':'CO3', 'Chloride':'Cl'}
for i in range(len(param)):
if param[i] in chemlist:
shortparam.append(param[i])
shortresult.append(float(result[i]))
shortname.append(chemdict[param[i]])
shortsmid.append(smid[i])
dat = zip(shortparam,shortresult,shortname,shortsmid)
df = pd.DataFrame(dat,columns=['shortparam','shortresult','shortname','shortsmid'])
dpiv = df.pivot(index='smid', columns='shortparam', values='shortresult')
dgrp = dpiv.groupby('smid').agg(np.mean)
arcpy.AddMessage(dgrp)
|
inkenbrandt/EPAEN
|
Transposer/Transposer.py
|
Python
|
gpl-2.0
| 1,726
|
import inspect
import os
from twisted.internet.defer import fail, succeed
from twisted.web.error import Error
from juju.lib.testing import TestCase
from juju.providers import ec2
from juju.providers.ec2.utils import get_current_ami, get_image_id
IMAGE_URI_TEMPLATE = "\
http://uec-images.ubuntu.com/query/%s/server/released.current.txt"
IMAGE_DATA_DIR = os.path.join(
os.path.dirname(inspect.getabsfile(ec2)), "tests", "data")
class GetCurrentAmiTest(TestCase):
def test_bad_url(self):
"""
If the requested page doesn't exist at all, a LookupError is raised
"""
page = self.mocker.replace("twisted.web.client.getPage")
page(IMAGE_URI_TEMPLATE % "nutty")
self.mocker.result(fail(Error("404")))
self.mocker.replay()
d = get_current_ami(ubuntu_release="nutty")
self.failUnlessFailure(d, LookupError)
return d
def test_umatched_ami(self):
"""
If an ami is not found that matches the specifications, then
a LookupError is raised.
"""
page = self.mocker.replace("twisted.web.client.getPage")
page(IMAGE_URI_TEMPLATE % "lucid")
self.mocker.result(succeed(""))
self.mocker.replay()
d = get_current_ami(ubuntu_release="lucid")
self.failUnlessFailure(d, LookupError)
return d
def test_current_ami(self):
"""The current server machine image can be retrieved."""
page = self.mocker.replace("twisted.web.client.getPage")
page(IMAGE_URI_TEMPLATE % "lucid")
self.mocker.result(succeed(
open(os.path.join(IMAGE_DATA_DIR, "lucid.txt")).read()))
self.mocker.replay()
d = get_current_ami(ubuntu_release="lucid")
def verify_result(result):
self.assertEqual(result, "ami-714ba518")
d.addCallback(verify_result)
return d
def test_current_ami_by_region(self):
"""The current server machine image can be retrieved by region."""
page = self.mocker.replace("twisted.web.client.getPage")
page(IMAGE_URI_TEMPLATE % "lucid")
self.mocker.result(
succeed(open(
os.path.join(IMAGE_DATA_DIR, "lucid.txt")).read()))
self.mocker.replay()
d = get_current_ami(ubuntu_release="lucid", region="us-west-1")
def verify_result(result):
self.assertEqual(result, "ami-cb97c68e")
d.addCallback(verify_result)
return d
def test_current_ami_non_ebs(self):
"""
The get_current_ami function accepts several filtering parameters
to guide image selection.
"""
page = self.mocker.replace("twisted.web.client.getPage")
page(IMAGE_URI_TEMPLATE % "lucid")
self.mocker.result(succeed(
open(os.path.join(IMAGE_DATA_DIR, "lucid.txt")).read()))
self.mocker.replay()
d = get_current_ami(ubuntu_release="lucid", persistent_storage=False)
def verify_result(result):
self.assertEqual(result, "ami-2d4aa444")
d.addCallback(verify_result)
return d
class GetImageIdTest(TestCase):
def test_default_image_id(self):
d = get_image_id({"default-image-id": "ami-burble"}, {})
d.addCallback(self.assertEquals, "ami-burble")
return d
def test_no_constraints(self):
get_current_ami_m = self.mocker.replace(get_current_ami)
get_current_ami_m(region="us-east-1")
self.mocker.result(succeed("ami-giggle"))
self.mocker.replay()
d = get_image_id({}, {})
d.addCallback(self.assertEquals, "ami-giggle")
return d
def test_default_series(self):
get_current_ami_m = self.mocker.replace(get_current_ami)
get_current_ami_m(region="us-east-1", ubuntu_release="puissant")
self.mocker.result(succeed("ami-pickle"))
self.mocker.replay()
d = get_image_id({"default-series": "puissant"}, {})
d.addCallback(self.assertEquals, "ami-pickle")
return d
def test_uses_constraints(self):
get_current_ami_m = self.mocker.replace(get_current_ami)
get_current_ami_m(ubuntu_release="serendipitous", architecture="x512",
daily=False, persistent_storage=True,
region="blah-north-6")
self.mocker.result(succeed("ami-tinkle"))
self.mocker.replay()
constraints = {
"architecture": "x512",
"ubuntu_release": "serendipitous",
"persistent_storage": True,
"daily": False}
d = get_image_id(
{"region": "blah-north-6", "default-series": "overridden"},
constraints)
d.addCallback(self.assertEquals, "ami-tinkle")
return d
|
mcclurmc/juju
|
juju/providers/ec2/tests/test_utils.py
|
Python
|
agpl-3.0
| 4,795
|
"""The Smart Meter Texas integration."""
import asyncio
import logging
from smart_meter_texas import Account, Client
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
DataUpdateCoordinator,
Debouncer,
UpdateFailed,
)
from .const import (
DATA_COORDINATOR,
DATA_SMART_METER,
DEBOUNCE_COOLDOWN,
DOMAIN,
SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Smart Meter Texas from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
account = Account(username, password)
smart_meter_texas_data = SmartMeterTexasData(hass, entry, account)
try:
await smart_meter_texas_data.client.authenticate()
except SmartMeterTexasAuthError:
_LOGGER.error("Username or password was not accepted")
return False
except asyncio.TimeoutError as error:
raise ConfigEntryNotReady from error
await smart_meter_texas_data.setup()
async def async_update_data():
_LOGGER.debug("Fetching latest data")
await smart_meter_texas_data.read_meters()
return smart_meter_texas_data
# Use a DataUpdateCoordinator to manage the updates. This is due to the
# Smart Meter Texas API which takes around 30 seconds to read a meter.
# This avoids Home Assistant from complaining about the component taking
# too long to update.
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Smart Meter Texas",
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
hass, _LOGGER, cooldown=DEBOUNCE_COOLDOWN, immediate=True
),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_COORDINATOR: coordinator,
DATA_SMART_METER: smart_meter_texas_data,
}
asyncio.create_task(coordinator.async_refresh())
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
class SmartMeterTexasData:
"""Manages coordinatation of API data updates."""
def __init__(
self, hass: HomeAssistant, entry: ConfigEntry, account: Account
) -> None:
"""Initialize the data coordintator."""
self._entry = entry
self.account = account
websession = aiohttp_client.async_get_clientsession(hass)
self.client = Client(websession, account)
self.meters = []
async def setup(self):
"""Fetch all of the user's meters."""
self.meters = await self.account.fetch_meters(self.client)
_LOGGER.debug("Discovered %s meter(s)", len(self.meters))
async def read_meters(self):
"""Read each meter."""
for meter in self.meters:
try:
await meter.read_meter(self.client)
except (SmartMeterTexasAPIError, SmartMeterTexasAuthError) as error:
raise UpdateFailed(error) from error
return self.meters
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
kennedyshead/home-assistant
|
homeassistant/components/smart_meter_texas/__init__.py
|
Python
|
apache-2.0
| 3,744
|
# -*- coding: utf-8 -*-
from rest_framework.test import APITestCase
from django.core.urlresolvers import reverse
import json
from helpers import *
class ImageUploadTest(APITestCase):
def test_list_uploaded_images(self):
"""
"""
url = reverse('uploadedimage-list')
#+ with self.assertNumQueries(1):
#+ response = self.client.get(url, format='json')
#+ response_data = json.loads(response.content.decode('utf8'))
#+ self.assertEqual(response.status_code, 200)
# /v1/uploads/images/ atados_core.views.UploadedImageViewSet uploadedimage-list
def test_get_uploaded_image(self):
"""
"""
url = reverse('uploadedimage-detail', args=[1])
#+ with self.assertNumQueries(1):
#+ response = self.client.get(url, format='json')
#+ response_data = json.loads(response.content.decode('utf8'))
#+ self.assertEqual(response.status_code, 200)
# /v1/uploads/images/<pk>/ atados_core.views.UploadedImageViewSet uploadedimage-detail
def test_upload_nonprofit_cover_image(self):
"""
"""
url = reverse('v1_upload_nonprofit_cover_image')
#+ with self.assertNumQueries(1):
#+ response = self.client.get(url, format='json')
#+ response_data = json.loads(response.content.decode('utf8'))
#+ self.assertEqual(response.status_code, 200)
# /v1/upload_nonprofit_cover_image/ rest_framework.decorators.upload_nonprofit_cover_image
def test_upload_nonprofit_profile_image(self):
"""
"""
url = reverse('v1_upload_nonprofit_profile_image')
#+ with self.assertNumQueries(1):
#+ response = self.client.get(url, format='json')
#+ response_data = json.loads(response.content.decode('utf8'))
#+ self.assertEqual(response.status_code, 200)
# /v1/upload_nonprofit_profile_image/ rest_framework.decorators.upload_nonprofit_profile_image
|
atados/api
|
atados_core/tests/test_routes/news/test_image_upload.py
|
Python
|
mit
| 1,810
|
__author__ = 'mpetyx'
from tastypie.authorization import DjangoAuthorization
from .models import OpeniOrder
from OPENiapp.APIS.OpeniGenericResource import GenericResource
from OPENiapp.APIS.OPENiAuthorization import Authorization
from OPENiapp.APIS.OPENiAuthentication import Authentication
class OrderResource(GenericResource):
class Meta:
queryset = OpeniOrder.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
resource_name = 'order'
authentication = Authentication()
authorization = Authorization()
# filtering = {
# 'slug': ALL,
# 'user': ALL_WITH_RELATIONS,
# 'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
# }
extra_actions = [
{
"name": "comments",
"http_method": "GET",
"resource_type": "list",
"description": "comments from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
},
{
"name": "likes",
"http_method": "GET",
"resource_type": "list",
"description": "likes from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
},
{
"name": "dislikes",
"http_method": "GET",
"resource_type": "list",
"description": "dislikes from CBS",
"fields": {
"cbs": {
"type": "string",
"required": True,
"description": "list of selected CBS"
}
}
}
]
|
OPENi-ict/ntua_demo
|
openiPrototype/openiPrototype/APIS/Products_and_Services/Order/Resources.py
|
Python
|
apache-2.0
| 2,146
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Pre build plugin which adds labels to dockerfile. Labels have to be specified either
as a dict:
{
"name": "add_labels_in_dockerfile",
"args": {
"labels": {
"label1": "value1",
"label 2": "some value"
}
}
}
Or as a string, which must be a dict serialised as JSON.
this will add turn this dockerfile:
```dockerfile
FROM fedora
CMD date
```
into this:
```dockerfile
FROM fedora
LABEL "label1"="value1" "label 2"="some value"
CMD date
```
By default there is parameter:
dont_overwrite=("Architecture", "architecture")
which disallows to overwrite labels in the list if they are in parent image.
After that is also another check via parameter :
dont_overwrite_if_in_dockerfile=("distribution-scope",)
which disallows to overwrite labels in the list if they are in dockerfile
Keys and values are quoted as necessary.
Equal labels, are more precisely labels of equal preferences, as they might
have same values, in case there is more equal labels specified in dockerfile
with different values, the value from the first in the list will be used
to set value for the missing ones.
"""
from __future__ import unicode_literals
from atomic_reactor import start_time as atomic_reactor_start_time
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.constants import INSPECT_CONFIG
from atomic_reactor.util import get_docker_architecture, df_parser, LabelFormatter
from osbs.utils import Labels
import json
import datetime
import re
class AddLabelsPlugin(PreBuildPlugin):
key = "add_labels_in_dockerfile"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, labels, dont_overwrite=None,
auto_labels=("build-date",
"architecture",
"vcs-type",
"vcs-ref",
"com.redhat.build-host"),
aliases=None,
dont_overwrite_if_in_dockerfile=("distribution-scope",),
info_url_format=None,
equal_labels=None):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param labels: dict, key value pairs to set as labels; or str, JSON-encoded dict
:param dont_overwrite: iterable, list of label keys which should not be overwritten
if they are present in parent image
:param auto_labels: iterable, list of labels to be determined automatically, if supported
it should contain only new label names and not old label names,
as they will be managed automatically
:param aliases: dict, maps old label names to new label names - for each old name found in
base image, dockerfile, or labels argument, a label with the new name is
added (with the same value)
:param dont_overwrite_if_in_dockerfile : iterable, list of label keys which should not be
overwritten if they are present in dockerfile
:param info_url_format : string, format for url dockerfile label
:param equal_labels: list, with equal labels groups as lists
"""
# call parent constructor
super(AddLabelsPlugin, self).__init__(tasker, workflow)
if isinstance(labels, str):
labels = json.loads(labels)
if not isinstance(labels, dict):
raise RuntimeError("labels have to be dict")
self.labels = labels
self.dont_overwrite = dont_overwrite or ()
self.dont_overwrite_if_in_dockerfile = dont_overwrite_if_in_dockerfile
self.aliases = aliases or Labels.get_new_names_by_old()
self.auto_labels = auto_labels or ()
self.info_url_format = info_url_format
self.equal_labels = equal_labels or []
if not isinstance(self.equal_labels, list):
raise RuntimeError("equal_labels have to be list")
def generate_auto_labels(self, base_labels, df_labels, plugin_labels):
generated = {}
all_labels = base_labels.copy()
all_labels.update(df_labels)
all_labels.update(plugin_labels)
# build date
dt = datetime.datetime.fromtimestamp(atomic_reactor_start_time)
generated['build-date'] = dt.isoformat()
# architecture - assuming host and image architecture is the same
generated['architecture'], _ = get_docker_architecture(self.tasker)
# build host
docker_info = self.tasker.get_info()
generated['com.redhat.build-host'] = docker_info['Name']
# VCS info
vcs = self.workflow.source.get_vcs_info()
if vcs:
generated['vcs-type'] = vcs.vcs_type
generated['vcs-url'] = vcs.vcs_url
generated['vcs-ref'] = vcs.vcs_ref
for lbl in self.auto_labels:
if lbl not in generated:
self.log.warning("requested automatic label %r is not available", lbl)
elif lbl in plugin_labels:
self.log.info("label %r is set explicitly, not using generated value", lbl)
else:
self.labels[lbl] = generated[lbl]
self.log.info("automatic label %r is generated to %r", lbl, generated[lbl])
def add_aliases(self, base_labels, df_labels, plugin_labels):
all_labels = base_labels.copy()
all_labels.update(df_labels)
all_labels.update(plugin_labels)
new_labels = df_labels.copy()
new_labels.update(plugin_labels)
applied_alias = False
not_applied = []
def add_as_an_alias(set_to, set_from):
self.log.warning("adding label %r as an alias for label %r", set_to, set_from)
self.labels[set_to] = all_labels[set_from]
self.log.info(self.labels)
return True
for old, new in self.aliases.items():
if old not in all_labels:
applied_alias = not_applied.append(old)
continue
# new label doesn't exists but old label does
# add new label with value from old label
if new not in all_labels:
applied_alias = add_as_an_alias(new, old)
continue
# new and old label exists, and have same value
if all_labels[old] == all_labels[new]:
self.log.debug("alias label %r for %r already exists, skipping", new, old)
continue
# new overwrites old, if new is explicitly specified,
# or if old and new are in baseimage
if new in new_labels or (new not in new_labels and old not in new_labels):
applied_alias = add_as_an_alias(old, new)
continue
# old is explicitly specified so overwriting new (from baseimage)
applied_alias = add_as_an_alias(new, old)
# this will ensure that once we've added once new label based on
# old label, if there are multiple old names, just first will be used
all_labels[new] = all_labels[old]
# warn if we applied only some aliases
if applied_alias and not_applied:
self.log.debug("applied only some aliases, following old labels were not found: %s",
", ".join(not_applied))
def set_missing_labels(labels_found, all_labels, value_from, not_in=(), not_value=None):
labels_to_set = all_labels.difference(set(labels_found))
for set_label in labels_to_set:
if set_label in not_in and value_from[labels_found[0]] == not_value[set_label]:
self.log.debug("skipping label %r because it is set correctly in base image",
set_label)
else:
self.labels[set_label] = value_from[labels_found[0]]
self.log.warning("adding equal label %r with value %r",
set_label, value_from[labels_found[0]])
for equal_list in self.equal_labels:
all_equal = set(equal_list)
found_labels_base = []
found_labels_new = []
for equal_label in equal_list:
if equal_label in new_labels:
found_labels_new.append(equal_label)
elif equal_label in base_labels:
found_labels_base.append(equal_label)
if found_labels_new:
set_missing_labels(found_labels_new, all_equal, new_labels,
found_labels_base, base_labels)
elif found_labels_base:
set_missing_labels(found_labels_base, all_equal, base_labels)
def add_info_url(self, base_labels, df_labels, plugin_labels):
all_labels = base_labels.copy()
all_labels.update(df_labels)
all_labels.update(plugin_labels)
info_url = LabelFormatter().vformat(self.info_url_format, [], all_labels)
self.labels['url'] = info_url
def run(self):
"""
run the plugin
"""
dockerfile = df_parser(self.workflow.builder.df_path, workflow=self.workflow)
lines = dockerfile.lines
if re.match('^koji/image-build(:.*)?$', dockerfile.baseimage):
base_image_labels = {}
else:
try:
config = self.workflow.base_image_inspect[INSPECT_CONFIG]
except KeyError:
message = "base image was not inspected"
self.log.error(message)
raise RuntimeError(message)
else:
base_image_labels = config["Labels"] or {}
self.generate_auto_labels(base_image_labels.copy(), dockerfile.labels.copy(),
self.labels.copy())
# changing dockerfile.labels writes out modified Dockerfile - err on
# the safe side and make a copy
self.add_aliases(base_image_labels.copy(), dockerfile.labels.copy(), self.labels.copy())
if self.info_url_format:
self.add_info_url(base_image_labels.copy(), dockerfile.labels.copy(),
self.labels.copy())
# correct syntax is:
# LABEL "key"="value" "key2"="value2"
# Make sure to escape '\' and '"' characters.
try:
# py3
env_trans = str.maketrans({'\\': '\\\\',
'"': '\\"'})
except AttributeError:
# py2
env_trans = None
def escape(s):
if env_trans:
return s.translate(env_trans)
return s.replace('\\', '\\\\').replace('"', '\\"')
labels = []
for key, value in self.labels.items():
if key not in dockerfile.labels or dockerfile.labels[key] != value:
if key in self.dont_overwrite_if_in_dockerfile and key in dockerfile.labels:
self.log.info("denying overwrite of label %r, using from Dockerfile", key)
elif (key in base_image_labels and
key in self.dont_overwrite and
key not in dockerfile.labels):
self.log.info("denying overwrite of label %r, using from baseimage", key)
else:
label = '"%s"="%s"' % (escape(key), escape(value))
self.log.info("setting label %r", label)
labels.append(label)
content = ""
if labels:
content = 'LABEL ' + " ".join(labels)
# put labels at the end of dockerfile (since they change metadata and do not interact
# with FS, this should cause no harm)
lines.append('\n' + content + '\n')
dockerfile.lines = lines
return content
|
jarodwilson/atomic-reactor
|
atomic_reactor/plugins/pre_add_labels_in_df.py
|
Python
|
bsd-3-clause
| 12,163
|
from django.core.management.base import BaseCommand, CommandError
from judge.templatetags.markdown.camo import client as camo_client
class Command(BaseCommand):
help = 'obtains the camo url for the specified url'
def add_arguments(self, parser):
parser.add_argument('url', help='url to use camo on')
def handle(self, *args, **options):
if camo_client is None:
raise CommandError('Camo not available')
print camo_client.image_url(options['url'])
|
Minkov/site
|
judge/management/commands/camo.py
|
Python
|
agpl-3.0
| 499
|
'''
Test IPsec
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.ipsec_operations as ipsec_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict1 = test_state.TestStateDict()
test_obj_dict2 = test_state.TestStateDict()
test_obj_dict3 = test_state.TestStateDict()
ipsec11 = None
ipsec12 = None
ipsec2 = None
ipsec3 = None
mevoco1_ip = None
mevoco2_ip = None
mevoco3_ip = None
def test():
global mevoco1_ip
global mevoco2_ip
global mevoco3_ip
global ipsec11
global ipsec12
global ipsec2
global ipsec3
mevoco1_ip = os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP']
mevoco2_ip = os.environ['secondZStackMnIp']
mevoco3_ip = os.environ['thirdZStackMnIp']
test_util.test_dsc('Create test vm in mevoco1')
vm1 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
test_obj_dict1.add_vm(vm1)
vm1.check()
pri_l3_uuid1 = vm1.vm.vmNics[0].l3NetworkUuid
vr1 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid1)[0]
l3_uuid1 = test_lib.lib_find_vr_pub_nic(vr1).l3NetworkUuid
vip11 = test_stub.create_vip('ipsec1_vip', l3_uuid1)
#vip12 = test_stub.create_vip('ipsec1_vip', l3_uuid1)
cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid1)
first_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Create test vm in mevoco2')
vm2 = test_stub.create_vlan_vm(os.environ.get('l3VlanDNATNetworkName'))
test_obj_dict2.add_vm(vm2)
vm2.check()
pri_l3_uuid2 = vm2.vm.vmNics[0].l3NetworkUuid
vr2 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid2)[0]
l3_uuid2 = test_lib.lib_find_vr_pub_nic(vr2).l3NetworkUuid
vip2 = test_stub.create_vip('ipsec2_vip', l3_uuid2)
cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid2)
second_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
test_util.test_dsc('Create test vm in mevoco3')
vm3 = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName3'))
test_obj_dict2.add_vm(vm3)
vm3.check()
pri_l3_uuid3 = vm3.vm.vmNics[0].l3NetworkUuid
vr3 = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid3)[0]
l3_uuid3 = test_lib.lib_find_vr_pub_nic(vr3).l3NetworkUuid
vip3 = test_stub.create_vip('ipsec3_vip', l3_uuid3)
cond = res_ops.gen_query_conditions('uuid', '=', pri_l3_uuid3)
third_zstack_cidrs = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].ipRanges[0].networkCidr
cond = res_ops.gen_query_conditions('l3Network.uuid', '=', pri_l3_uuid3)
cond = res_ops.gen_query_conditions('vmInstanceUuid', '=', vr3.uuid, cond)
vr3_pri_ip = res_ops.query_resource(res_ops.VM_NIC, cond)[0].ip
cmd = 'route del default; route add default gw %s' %vr3_pri_ip
os.system("sshpass -p 'password' ssh root@%s '%s'" %(vm3.vm.vmNics[0].ip, cmd))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_dsc('Create ipsec in mevoco1')
ipsec11 = ipsec_ops.create_ipsec_connection('ipsec11', pri_l3_uuid1, vip2.get_vip().ip, '123456', vip11.get_vip().uuid, [second_zstack_cidrs])
#ipsec12 = ipsec_ops.create_ipsec_connection('ipsec12', pri_l3_uuid1, vip3.get_vip().ip, '123456', vip12.get_vip().uuid, [third_zstack_cidrs])
ipsec12 = ipsec_ops.create_ipsec_connection('ipsec12', pri_l3_uuid1, vip3.get_vip().ip, '123456', vip11.get_vip().uuid, [third_zstack_cidrs])
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
test_util.test_dsc('Create ipsec in mevoco2')
ipsec2 = ipsec_ops.create_ipsec_connection('ipsec2', pri_l3_uuid2, vip11.get_vip().ip, '123456', vip2.get_vip().uuid, [first_zstack_cidrs])
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
test_util.test_dsc('Create ipsec in mevoco3')
#ipsec3 = ipsec_ops.create_ipsec_connection('ipsec3', pri_l3_uuid3, vip12.get_vip().ip, '123456', vip3.get_vip().uuid, [first_zstack_cidrs])
ipsec3 = ipsec_ops.create_ipsec_connection('ipsec3', pri_l3_uuid3, vip11.get_vip().ip, '123456', vip3.get_vip().uuid, [first_zstack_cidrs])
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
if not test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip):
test_util.test_fail('vm in mevoco1[MN:%s] could not connect to vm in mevoco2[MN:%s]' % (mevoco1_ip, mevoco2_ip))
if not test_lib.lib_check_ping(vm1.vm, vm3.vm.vmNics[0].ip):
test_util.test_fail('vm in mevoco1[MN:%s] could not connect to vm in mevoco3[MN:%s]' % (mevoco1_ip, mevoco3_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
if not test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip):
test_util.test_fail('vm in mevoco2[MN:%s] could not connect to vm in mevoco1[MN:%s]' % (mevoco2_ip, mevoco1_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
if not test_lib.lib_check_ping(vm3.vm, vm1.vm.vmNics[0].ip):
test_util.test_fail('vm in mevoco3[MN:%s] could not connect to vm in mevoco1[MN:%s]' % (mevoco3_ip, mevoco1_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
ipsec_ops.delete_ipsec_connection(ipsec11.uuid)
if test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip, no_exception=True):
test_util.test_fail('vm in mevoco1[MN:%s] could still connect to vm in mevoco2[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco2_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
if test_lib.lib_check_ping(vm2.vm, vm1.vm.vmNics[0].ip, no_exception=True):
test_util.test_fail('vm in mevoco2[MN:%s] could still connect to vm in mevoco1[MN:%s] after Ipsec is deleted' % (mevoco2_ip, mevoco1_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
if not test_lib.lib_check_ping(vm1.vm, vm3.vm.vmNics[0].ip):
test_util.test_fail('vm in mevoco1[MN:%s] could not connect to vm in mevoco3[MN:%s]' % (mevoco1_ip, mevoco3_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
ipsec_ops.delete_ipsec_connection(ipsec3.uuid)
if test_lib.lib_check_ping(vm3.vm, vm1.vm.vmNics[0].ip, no_exception=True):
test_util.test_fail('vm in mevoco1[MN:%s] could still connect to vm in mevoco3[MN:%s] after Ipsec is deleted' % (mevoco1_ip, mevoco3_ip))
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
ipsec_ops.delete_ipsec_connection(ipsec12.uuid)
test_lib.lib_error_cleanup(test_obj_dict1)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
ipsec_ops.delete_ipsec_connection(ipsec2.uuid)
test_lib.lib_error_cleanup(test_obj_dict2)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
test_util.test_pass('Create Ipsec Success')
#Will be called only if exception happens in test().
def error_cleanup():
global mevoco1_ip
global mevoco2_ip
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco1_ip
global test_obj_dict1
test_lib.lib_error_cleanup(test_obj_dict1)
global ipsec11
if ipsec11 != None:
ipsec_ops.delete_ipsec_connection(ipsec11.uuid)
global ipsec12
if ipsec12 != None:
ipsec_ops.delete_ipsec_connection(ipsec12.uuid)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco2_ip
global test_obj_dict2
test_lib.lib_error_cleanup(test_obj_dict2)
global ipsec2
if ipsec2 != None:
ipsec_ops.delete_ipsec_connection(ipsec2.uuid)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = mevoco3_ip
global test_obj_dict3
test_lib.lib_error_cleanup(test_obj_dict3)
global ipsec3
if ipsec3 != None:
ipsec_ops.delete_ipsec_connection(ipsec3.uuid)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/virtualrouter/ipsec/test_create_2_ipsec.py
|
Python
|
apache-2.0
| 8,069
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gc
import logging
from pandas import Series, concat, DataFrame
import numpy as np
from numpy import where
from openfisca_france_data.temporary import TemporaryStore
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.input_data_builders.build_openfisca_survey_data.base import create_replace
from openfisca_france_data.input_data_builders.build_openfisca_survey_data.utils import print_id, control
from openfisca_survey_manager.survey_collections import SurveyCollection
log = logging.getLogger(__name__)
def create_totals(year = None):
assert year is not None
temporary_store = TemporaryStore.create(file_name = "erfs")
replace = create_replace(year)
# On part de la table individu de l'ERFS
# on renomme les variables
log.info(u"Creating Totals")
log.info(u"Etape 1 : Chargement des données")
erfs_survey_collection = SurveyCollection.load(collection = 'erfs', config_files_directory = config_files_directory)
data = erfs_survey_collection.get_survey('erfs_{}'.format(year))
indivim = temporary_store['indivim_{}'.format(year)]
assert not indivim.duplicated(['noindiv']).any(), "Présence de doublons"
# Deals individuals with imputed income : some individuals are in 'erf individu table' but
# not in the 'foyer' table. We need to create a foyer for them.
selection = Series()
for var in ["zsali", "zchoi", "zrsti", "zalri", "zrtoi", "zragi", "zrici", "zrnci"]:
varo = var[:-1] + "o"
test = indivim[var] != indivim[varo]
if len(selection) == 0:
selection = test
else:
selection = (test) | (selection)
indivi_i = indivim[selection].copy()
indivi_i.rename(
columns = {
"ident": "idmen",
"persfip": "quifoy",
"zsali": "sali2", # Inclu les salaires non imposables des agents d'assurance
"zchoi": "choi2",
"zrsti": "rsti2",
"zalri": "alr2"
},
inplace = True,
)
indivi_i.quifoy = where(indivi_i.quifoy.isnull(), "vous", indivi_i.quifoy)
indivi_i.quelfic = "FIP_IMP"
# We merge them with the other individuals
indivim.rename(
columns = dict(
ident = "idmen",
persfip = "quifoy",
zsali = "sali2", # Inclu les salaires non imposables des agents d'assurance
zchoi = "choi2",
zrsti = "rsti2",
zalri = "alr2",
),
inplace = True,
)
if not (set(list(indivim.noindiv)) > set(list(indivi_i.noindiv))):
raise Exception("Individual ")
indivim.set_index("noindiv", inplace = True)
indivi_i.set_index("noindiv", inplace = True)
indivi = indivim
del indivim
indivi.update(indivi_i)
indivi.reset_index(inplace = True)
log.info("Etape 2 : isolation des FIP")
fip_imp = indivi.quelfic == "FIP_IMP"
indivi["idfoy"] = (
indivi.idmen.astype("int64") * 100 +
(indivi.declar1.str[0:2]).convert_objects(convert_numeric=True)
)
indivi.loc[fip_imp, "idfoy"] = np.nan
# Certains FIP (ou du moins avec revenus imputés) ont un numéro de déclaration d'impôt ( pourquoi ?)
fip_has_declar = (fip_imp) & (indivi.declar1.notnull())
indivi["idfoy"] = where(
fip_has_declar,
indivi.idmen * 100 + indivi.declar1.str[0:2].convert_objects(convert_numeric = True),
indivi.idfoy)
del fip_has_declar
fip_no_declar = (fip_imp) & (indivi.declar1.isnull())
del fip_imp
indivi["idfoy"] = where(fip_no_declar, indivi["idmen"] * 100 + 50, indivi["idfoy"])
indivi_fnd = indivi[["idfoy", "noindiv"]][fip_no_declar].copy()
while any(indivi_fnd.duplicated(cols=["idfoy"])):
indivi_fnd["idfoy"] = where(
indivi_fnd.duplicated(cols=["idfoy"]),
indivi_fnd["idfoy"] + 1,
indivi_fnd["idfoy"]
)
# assert indivi_fnd["idfoy"].duplicated().value_counts()[False] == len(indivi_fnd["idfoy"].values), "Duplicates remaining"
assert len(indivi[indivi.duplicated(['noindiv'])]) == 0, "Doublons"
indivi.idfoy[fip_no_declar] = indivi_fnd.idfoy.copy()
del indivi_fnd, fip_no_declar
log.info(u"Etape 3 : Récupération des EE_NRT")
nrt = indivi.quelfic == "EE_NRT"
indivi.idfoy = where(nrt, indivi.idmen * 100 + indivi.noi, indivi.idfoy)
indivi.quifoy[nrt] = "vous"
del nrt
pref_or_cref = indivi.lpr.isin([1, 2])
adults = (indivi.quelfic.isin(["EE", "EE_CAF"])) & (pref_or_cref)
indivi.idfoy = where(adults, indivi.idmen * 100 + indivi.noi, indivi.idfoy)
indivi.loc[adults, "quifoy"] = "vous"
del adults
# TODO: hack to avoid assert error
log.info("{}".format(indivi.loc[indivi['lpr'].isin([1, 2]), "idfoy"].notnull().value_counts()))
assert indivi.idfoy[indivi.lpr.dropna().isin([1, 2])].all()
log.info(u"Etape 4 : Rattachement des enfants aux déclarations")
assert not(indivi.noindiv.duplicated().any()), "Some noindiv appear twice"
lpr3_or_lpr4 = indivi['lpr'].isin([3, 4])
enf_ee = (lpr3_or_lpr4) & (indivi.quelfic.isin(["EE", "EE_CAF"]))
assert indivi.noindiv[enf_ee].notnull().all(), " Some noindiv are not set, which will ruin next stage"
assert not(indivi.noindiv[enf_ee].duplicated().any()), "Some noindiv appear twice"
pere = DataFrame({
"noindiv_enf": indivi.noindiv.loc[enf_ee],
"noindiv": 100 * indivi.idmen.loc[enf_ee] + indivi.noiper.loc[enf_ee]
})
mere = DataFrame({
"noindiv_enf": indivi.noindiv.loc[enf_ee],
"noindiv": 100 * indivi.idmen.loc[enf_ee] + indivi.noimer.loc[enf_ee]
})
foyer = data.get_values(variables = ["noindiv", "zimpof"], table = replace["foyer"])
pere = pere.merge(foyer, how = "inner", on = "noindiv")
mere = mere.merge(foyer, how = "inner", on = "noindiv")
df = pere.merge(mere, how = "outer", on = "noindiv_enf", suffixes=('_p', '_m'))
log.info(u" 4.1 : gestion des personnes dans 2 foyers")
for col in ["noindiv_p", "noindiv_m", "noindiv_enf"]:
df[col] = df[col].fillna(0, inplace = True) # beacause groupby drop groups with NA in index
df = df.groupby(by = ["noindiv_p", "noindiv_m", "noindiv_enf"]).sum()
df.reset_index(inplace = True)
df["which"] = ""
df.which = where((df.zimpof_m.notnull()) & (df.zimpof_p.isnull()), "mere", "")
df.which = where((df.zimpof_p.notnull()) & (df.zimpof_m.isnull()), "pere", "")
both = (df.zimpof_p.notnull()) & (df.zimpof_m.notnull())
df.which = where(both & (df.zimpof_p > df.zimpof_m), "pere", "mere")
df.which = where(both & (df.zimpof_m >= df.zimpof_p), "mere", "pere")
assert df.which.notnull().all(), "Some enf_ee individuals are not matched with any pere or mere"
del lpr3_or_lpr4, pere, mere
df.rename(columns = {"noindiv_enf": "noindiv"}, inplace = True)
df['idfoy'] = where(df.which == "pere", df.noindiv_p, df.noindiv_m)
df['idfoy'] = where(df.which == "mere", df.noindiv_m, df.noindiv_p)
assert df["idfoy"].notnull().all()
dropped = [col for col in df.columns if col not in ["idfoy", "noindiv"]]
df.drop(dropped, axis = 1, inplace = True)
assert not(df.duplicated().any())
df.set_index("noindiv", inplace = True, verify_integrity = True)
indivi.set_index("noindiv", inplace = True, verify_integrity = True)
ind_notnull = indivi["idfoy"].notnull().sum()
ind_isnull = indivi["idfoy"].isnull().sum()
indivi = indivi.combine_first(df)
assert ind_notnull + ind_isnull == (
indivi["idfoy"].notnull().sum() +
indivi["idfoy"].isnull().sum()
)
indivi.reset_index(inplace = True)
assert not(indivi.duplicated().any())
# MBJ: issue delt with when moving from R code to python
# TODO il faut rajouterles enfants_fip et créer un ménage pour les majeurs
# On suit guide méthodo erf 2003 page 135
# On supprime les conjoints FIP et les FIP de 25 ans et plus;
# On conserve les enfants FIP de 19 à 24 ans;
# On supprime les FIP de 18 ans et moins, exceptés les FIP nés en 2002 dans un
# ménage en 6ème interrogation car ce sont des enfants nés aprés la date d'enquète
# EEC que l'on ne retrouvera pas dans les EEC suivantes.
#
log.info(u" 4.2 : On enlève les individus pour lesquels il manque le déclarant")
fip = temporary_store['fipDat_{}'.format(year)]
fip["declar"] = np.nan
fip["agepf"] = np.nan
fip.drop(["actrec", "year", "noidec"], axis = 1, inplace = True)
fip.naia = fip.naia.astype("int32")
fip.rename(
columns = dict(
ident = "idmen",
persfip = "quifoy",
zsali = "sali2", # Inclu les salaires non imposables des agents d'assurance
zchoi = "choi2",
zrsti = "rsti2",
zalri = "alr2"),
inplace = True)
is_fip_19_25 = ((year - fip.naia - 1) >= 19) & ((year - fip.naia - 1) < 25)
# TODO: BUT for the time being we keep them in thier vous menage so the following lines are commented
# The idmen are of the form 60XXXX we use idmen 61XXXX, 62XXXX for the idmen of the kids over 18 and less than 25
# fip[is_fip_19_25 ,"idmen"] <- (99-fip[is_fip_19_25,"noi"]+1)*100000 + fip[is_fip_19_25,"idmen"]
# fip[is_fip_19_25 ,"lpr"] <- 1
#
# indivi <- rbind.fill(indivi,fip[is_fip_19_25,])
indivi = concat([indivi, fip.loc[is_fip_19_25]])
del is_fip_19_25
indivi['age'] = year - indivi.naia - 1
indivi['age_en_mois'] = 12 * indivi.age + 12 - indivi.naim
indivi["quimen"] = 0
indivi.quimen[indivi.lpr == 1] = 0
indivi.quimen[indivi.lpr == 2] = 1
indivi.quimen[indivi.lpr == 3] = 2
indivi.quimen[indivi.lpr == 4] = 3
indivi['not_pr_cpr'] = None # Create a new row
indivi.not_pr_cpr[indivi.lpr <= 2] = False
indivi.not_pr_cpr[indivi.lpr > 2] = True
assert indivi.not_pr_cpr.isin([True, False]).all()
log.info(u" 4.3 : Creating non pr=0 and cpr=1 idmen's")
indivi.reset_index(inplace = True)
test1 = indivi[['quimen', 'idmen']][indivi.not_pr_cpr].copy()
test1['quimen'] = 2
j = 2
while any(test1.duplicated(['quimen', 'idmen'])):
test1.loc[test1.duplicated(['quimen', 'idmen']), 'quimen'] = j + 1
j += 1
print_id(indivi)
indivi.update(test1)
print_id(indivi)
# indivi.set_index(['quimen']) #TODO: check relevance
# TODO problème avec certains idfoy qui n'ont pas de vous
log.info(u"Etape 5 : Gestion des idfoy qui n'ont pas de vous")
all_ind = indivi.drop_duplicates('idfoy')
with_ = indivi.loc[indivi.quifoy == 'vous', 'idfoy']
without = all_ind[~(all_ind.idfoy.isin(with_.values))]
log.info(u"On cherche si le déclarant donné par la deuxième déclaration est bien un vous")
# TODO: the following should be delt with at the import of the tables
indivi.replace(
to_replace = {
'declar2': {'NA': np.nan, '': np.nan}
},
inplace = True
)
has_declar2 = (indivi.idfoy.isin(without.idfoy.values)) & (indivi.declar2.notnull())
decl2_idfoy = (
indivi.loc[has_declar2, "idmen"].astype('int') * 100 +
indivi.loc[has_declar2, "declar2"].str[0:2].astype('int') )
indivi.loc[has_declar2, 'idfoy'] = where(decl2_idfoy.isin(with_.values), decl2_idfoy, None)
del all_ind, with_, without, has_declar2
log.info(u" 5.1 : Elimination idfoy restant")
idfoyList = indivi.loc[indivi.quifoy == "vous", 'idfoy'].drop_duplicates()
indivi = indivi[indivi.idfoy.isin(idfoyList.values)]
del idfoyList
print_id(indivi)
# Sélectionne les variables à garder pour les steps suivants
myvars = [
"actrec",
"age",
"age_en_mois",
"chpub",
"encadr",
"idfoy",
"idmen",
"nbsala",
"noi",
"noindiv",
"prosa",
"quelfic",
"quifoy",
"quimen",
"statut",
"titc",
"txtppb",
"wprm",
"rc1rev",
"maahe",
]
assert len(set(myvars).difference(set(indivi.columns))) == 0, \
"Manquent les colonnes suivantes : {}".format(set(myvars).difference(set(indivi.columns)))
indivi = indivi[myvars].copy()
# TODO les actrec des fip ne sont pas codées (on le fera à la fin quand on aura rassemblé
# les infos provenant des déclarations)
log.info(u"Etape 6 : Création des variables descriptives")
log.info(u" 6.1 : variable activité")
indivi['activite'] = None
indivi['activite'][indivi.actrec <= 3] = 0
indivi['activite'][indivi.actrec == 4] = 1
indivi['activite'][indivi.actrec == 5] = 2
indivi['activite'][indivi.actrec == 7] = 3
indivi['activite'][indivi.actrec == 8] = 4
indivi['activite'][indivi.age <= 13] = 2 # ce sont en fait les actrec=9
log.info("{}".format(indivi['activite'].value_counts(dropna = False)))
# TODO: MBJ problem avec les actrec
# TODO: FIX AND REMOVE
indivi.activite[indivi.actrec.isnull()] = 5
indivi.titc[indivi.titc.isnull()] = 0
assert indivi.titc.notnull().all(), u"Problème avec les titc" # On a 420 NaN pour les varaibels statut, titc etc
log.info(u" 6.2 : variable statut")
indivi.statut[indivi.statut.isnull()] = 0
indivi.statut = indivi.statut.astype('int')
indivi.statut[indivi.statut == 11] = 1
indivi.statut[indivi.statut == 12] = 2
indivi.statut[indivi.statut == 13] = 3
indivi.statut[indivi.statut == 21] = 4
indivi.statut[indivi.statut == 22] = 5
indivi.statut[indivi.statut == 33] = 6
indivi.statut[indivi.statut == 34] = 7
indivi.statut[indivi.statut == 35] = 8
indivi.statut[indivi.statut == 43] = 9
indivi.statut[indivi.statut == 44] = 10
indivi.statut[indivi.statut == 45] = 11
assert indivi.statut.isin(range(12)).all(), u"statut value over range"
log.info(u" 6.3 : variable txtppb")
indivi.txtppb.fillna(0, inplace = True)
assert indivi.txtppb.notnull().all()
indivi.nbsala.fillna(0, inplace = True)
indivi['nbsala'] = indivi.nbsala.astype('int')
indivi.nbsala[indivi.nbsala == 99] = 10
assert indivi.nbsala.isin(range(11)).all()
log.info(u" 6.4 : variable chpub et CSP")
indivi.chpub.fillna(0, inplace = True)
indivi.chpub = indivi.chpub.astype('int')
indivi.chpub[indivi.chpub.isnull()] = 0
assert indivi.chpub.isin(range(11)).all()
indivi['cadre'] = 0
indivi.prosa.fillna(0, inplace = True)
assert indivi['prosa'].notnull().all()
log.info("{}".format(indivi['encadr'].value_counts(dropna = False)))
# encadr : 1=oui, 2=non
indivi.encadr.fillna(2, inplace = True)
indivi.encadr[indivi.encadr == 0] = 2
assert indivi.encadr.notnull().all()
assert indivi.encadr.isin([1, 2]).all()
indivi['cadre'][indivi.prosa.isin([7, 8])] = 1
indivi['cadre'][(indivi.prosa == 9) & (indivi.encadr == 1)] = 1
assert indivi['cadre'].isin(range(2)).all()
log.info(
u"Etape 7: on vérifie qu'il ne manque pas d'info sur les liens avec la personne de référence")
log.info(
u"nb de doublons idfam/quifam {}".format(len(indivi[indivi.duplicated(cols=['idfoy', 'quifoy'])])))
log.info(u"On crée les n° de personnes à charge")
assert indivi['idfoy'].notnull().all()
print_id(indivi)
indivi['quifoy2'] = 2
indivi.quifoy2[indivi.quifoy == 'vous'] = 0
indivi.quifoy2[indivi.quifoy == 'conj'] = 1
indivi.quifoy2[indivi.quifoy == 'pac'] = 2
del indivi['quifoy']
indivi['quifoy'] = indivi.quifoy2
del indivi['quifoy2']
print_id(indivi)
test2 = indivi[['quifoy', 'idfoy', 'noindiv']][indivi['quifoy'] == 2].copy()
print_id(test2)
j = 2
while test2.duplicated(['quifoy', 'idfoy']).any():
test2.loc[test2.duplicated(['quifoy', 'idfoy']), 'quifoy'] = j
j += 1
print_id(test2)
indivi = indivi.merge(test2, on = ['noindiv', 'idfoy'], how = "left")
indivi['quifoy'] = indivi['quifoy_x']
indivi['quifoy'] = where(indivi['quifoy_x'] == 2, indivi['quifoy_y'], indivi['quifoy_x'])
del indivi['quifoy_x'], indivi['quifoy_y']
print_id(indivi)
del test2, fip
log.info(
u"nb de doublons idfam/quifam' {}".format(
len(indivi[indivi.duplicated(subset = ['idfoy', 'quifoy'])])
)
)
print_id(indivi)
log.info(u"Etape 8 : création des fichiers totaux")
famille = temporary_store['famc_{}'.format(year)]
log.info(u" 8.1 : création de tot2 & tot3")
tot2 = indivi.merge(famille, on = 'noindiv', how = 'inner')
# del famille # TODO: MBJ increase in number of menage/foyer when merging with family ...
del famille
control(tot2, debug = True, verbose = True)
assert tot2.quifam.notnull().all()
temporary_store['tot2_{}'.format(year)] = tot2
del indivi
log.info(u" tot2 saved")
tot2.merge(foyer, how = 'left')
tot2 = tot2[tot2.idmen.notnull()].copy()
print_id(tot2)
tot3 = tot2
# TODO: check where they come from
tot3 = tot3.drop_duplicates(subset = 'noindiv')
log.info("{}".format(len(tot3)))
# Block to remove any unwanted duplicated pair
control(tot3, debug = True, verbose = True)
tot3 = tot3.drop_duplicates(subset = ['idfoy', 'quifoy'])
tot3 = tot3.drop_duplicates(subset = ['idfam', 'quifam'])
tot3 = tot3.drop_duplicates(subset = ['idmen', 'quimen'])
tot3 = tot3.drop_duplicates(subset = ['noindiv'])
control(tot3)
log.info(u" 8.2 : On ajoute les variables individualisables")
allvars = temporary_store['ind_vars_to_remove_{}'.format(year)]
vars2 = set(tot3.columns).difference(set(allvars))
tot3 = tot3[list(vars2)]
log.info("{}".format(len(tot3)))
assert not(tot3.duplicated(subset = ['noindiv']).any()), "doublon dans tot3['noindiv']"
lg_dup = len(tot3[tot3.duplicated(['idfoy', 'quifoy'])])
assert lg_dup == 0, "{} pairs of idfoy/quifoy in tot3 are duplicated".format(lg_dup)
temporary_store['tot3_{}'.format(year)] = tot3
control(tot3)
del tot2, allvars, tot3, vars2
log.info(u"tot3 sauvegardé")
gc.collect()
def create_final(year):
temporary_store = TemporaryStore.create(file_name = "erfs")
log.info(u"création de final")
foy_ind = temporary_store['foy_ind_{}'.format(year)]
tot3 = temporary_store['tot3_{}'.format(year)]
foy_ind.set_index(['idfoy', 'quifoy'], inplace = True)
tot3.set_index(['idfoy', 'quifoy'], inplace = True)
final = concat([tot3, foy_ind], join_axes=[tot3.index], axis=1)
final.reset_index(inplace = True)
foy_ind.reset_index(inplace = True)
tot3.reset_index(inplace = True)
# tot3 = tot3.drop_duplicates(cols=['idfam', 'quifam'])
final = final[final.idmen.notnull()]
control(final, verbose=True)
del tot3, foy_ind
gc.collect()
# final <- merge(final, sif, by = c('noindiv'), all.x = TRUE)
log.info(" loading fip")
sif = temporary_store['sif_{}'.format(year)]
log.info("{}".format(sif.columns))
log.info(" update final using fip")
final = final.merge(sif, on=["noindiv"], how="left")
# TODO: IL FAUT UNE METHODE POUR GERER LES DOUBLES DECLARATIONS
control(final, debug=True)
final['caseP'] = final.caseP.fillna(False)
final['caseF'] = final.caseF.fillna(False)
print_id(final)
temporary_store['final_{}'.format(year)] = final
log.info(u"final sauvegardé")
del sif, final
if __name__ == '__main__':
year = 2009
# create_totals(year = year)
create_final(year = year)
log.info(u"étape 06 remise en forme des données terminée")
|
LouisePaulDelvaux/openfisca-france-data
|
openfisca_france_data/input_data_builders/build_openfisca_survey_data/step_06_rebuild.py
|
Python
|
agpl-3.0
| 20,725
|
# A regex-based Lexer/tokenizer.
# See the if __main__ section in the bottom for an example.
#
#-----------------------------------------------
# Eli Bendersky (eliben@gmail.com)
# License: this code is in the public domain
# Last modified: March 2009
#-----------------------------------------------
#
import re
import sys
class Token(object):
""" A simple Token structure.
Contains the token type, value and position.
"""
def __init__(self, type, val, pos):
self.type = type
self.val = val
self.pos = pos
def __str__(self):
return '%s(%s) at %s' % (self.type, self.val, self.pos)
class LexerError(Exception):
""" Lexer error exception.
pos:
Position in the input line where the error occurred.
"""
def __init__(self, pos):
self.pos = pos
class Lexer(object):
""" A simple regex-based lexer/tokenizer.
See below for an example of usage.
"""
def __init__(self, rules, skip_whitespace=True):
""" Create a lexer.
rules:
A list of rules. Each rule is a `regex, type`
pair, where `regex` is the regular expression used
to recognize the token and `type` is the type
of the token to return when it's recognized.
skip_whitespace:
If True, whitespace (\s+) will be skipped and not
reported by the lexer. Otherwise, you have to
specify your rules for whitespace, or it will be
flagged as an error.
"""
# All the regexes are concatenated into a single one
# with named groups. Since the group names must be valid
# Python identifiers, but the token types used by the
# user are arbitrary strings, we auto-generate the group
# names and map them to token types.
#
idx = 1
regex_parts = []
self.group_type = {}
for regex, type in rules:
groupname = 'GROUP%s' % idx
regex_parts.append('(?P<%s>%s)' % (groupname, regex))
self.group_type[groupname] = type
idx += 1
self.regex = re.compile('|'.join(regex_parts))
self.skip_whitespace = skip_whitespace
self.re_ws_skip = re.compile('\S')
def input(self, buf):
""" Initialize the lexer with a buffer as input.
"""
self.buf = buf
self.pos = 0
def token(self):
""" Return the next token (a Token object) found in the
input buffer. None is returned if the end of the
buffer was reached.
In case of a lexing error (the current chunk of the
buffer matches no rule), a LexerError is raised with
the position of the error.
"""
if self.pos >= len(self.buf):
return None
else:
if self.skip_whitespace:
m = self.re_ws_skip.search(self.buf[self.pos:])
if m:
self.pos += m.start()
else:
return None
m = self.regex.match(self.buf[self.pos:])
if m:
groupname = m.lastgroup
tok_type = self.group_type[groupname]
tok = Token(tok_type, m.group(groupname), self.pos)
self.pos += m.end()
return tok
# if we're here, no rule matched
raise LexerError(self.pos)
def tokens(self):
""" Returns an iterator to the tokens found in the buffer.
"""
while 1:
tok = self.token()
if tok is None: break
yield tok
if __name__ == '__main__':
rules = [
('\d+', 'NUMBER'),
('[a-zA-Z_]\w+', 'IDENTIFIER'),
('\+', 'PLUS'),
('\-', 'MINUS'),
('\*', 'MULTIPLY'),
('\/', 'DIVIDE'),
('\(', 'LP'),
('\)', 'RP'),
('=', 'EQUALS'),
]
lx = Lexer(rules, skip_whitespace=True)
lx.input('erw = _abc + 12*(R4-623902) ')
try:
for tok in lx.tokens():
print tok
except LexerError, err:
print 'LexerError at position', err.pos
|
evandrix/Splat
|
doc/parser/lexer.py
|
Python
|
mit
| 4,457
|
#!/usr/bin/python
#
# tests rhn.rpclib.Server(), connection through proxy
#
# $Id$
#
# USAGE: $0 SERVER PROXY [SYSTEMID]
import sys
sys.path.append('..')
from rhn import rpclib
SERVER = "xmlrpc.rhn.redhat.com"
HANDLER = "/XMLRPC"
PROXY = "proxy.example.com:8080"
system_id_file = '/etc/sysconfig/rhn/systemid'
if len(sys.argv) < 3:
print "Non efficient cmd-line arguments! Provide at least server & proxy!"
sys.exit(1);
try:
SERVER = sys.argv[1]
PROXY = sys.argv[2]
system_id_file = sys.argv[3]
except:
pass
SERVER_URL = "https://" + SERVER + HANDLER
systemid = open(system_id_file).read()
s = rpclib.Server(SERVER_URL, proxy = PROXY)
dict = s.up2date.login(systemid);
print "Test PASSES"
|
colloquium/spacewalk
|
client/rhel/rhnlib/test/21-proxy-user-agent.py
|
Python
|
gpl-2.0
| 723
|
"""
* Copyright (C) The Project "Nugget" Team - All Rights Reserved
* Written by Jordan Maxwell <jordanmax@nxt-studios.com>, May 1st, 2017
* Licensing information can found in 'LICENSE', which is part of this source code package.
"""
from panda3d.core import Filename, ExecutionEnvironment, WindowProperties, CullBinEnums, CullBinManager, BamCache, loadPrcFileData
from direct.showbase.ShowBase import ShowBase
from direct.directnotify import DirectNotifyGlobal
from direct.gui.DirectGui import *
from direct.task import Task
from nugget.nuggetbase.GameStateFSM import GameStateFSM
from nugget.nuggetbase.GameSettings import GameSettings
from nugget.audio.AudioManagerBase import AudioManagerBase
from nugget.audio.MusicManager import MusicManager
import __builtin__
import time
import os
class NuggetBase(ShowBase):
notify = DirectNotifyGlobal.directNotify.newCategory('NuggetBase')
def __init__(self):
ShowBase.__init__(self)
__builtin__.__dev__ = self.config.GetBool('want-dev', False)
self.__fpsEnabled = False
self.__isMainWindowOpen = False
if __dev__:
self.notify.info('Starting in Development mode.')
self.accept('f1', self.toggleFPS)
if config.GetBool('want-screenshots', True):
self.accept('f12', self.takeScreenshot)
self.bamCache = BamCache.getGlobalPtr()
if __dev__:
flavor = self.config.GetString('dev-branch-flavor', '')
if flavor:
cachePath = '/cache/cahce_%s' % flavor
else:
cachePath = '/cache/cache'
self.bamCache.setRoot(Filename(cachePath))
else:
self.bamCache.setRoot(Filename('./cache'))
self.bamCache.setActive(False)
self.bamCache.setActive(self.config.GetBool('want-bam-cache', False))
self.bamCache.setCacheModels(False)
self.bamCache.setCacheTextures(True)
self.bamCache.setCacheCompressedTextures(True)
cullPtr = CullBinManager.getGlobalPtr()
cullPtr.addBin('background', CullBinEnums.BTFixed, 14)
cullPtr.addBin('foreground', CullBinEnums.BTFixed, 15)
cullPtr.addBin('objects', CullBinEnums.BTFixed, 16)
cullPtr.addBin('gui-fixed', CullBinEnums.BTFixed, 55)
cullPtr.addBin('gui-popup', CullBinEnums.BTUnsorted, 60)
self.currentSave = None
self.settings = GameSettings()
self.settings.load(GameSettings.DEFAULT_FILE_PATH)
self.settings.setRuntimeOptions()
loadPrcFileData('game_options', self.settings.settingsToPrcData())
self.accept('PandaPaused', self.disableAllAudio)
self.accept('PandaRestarted', self.enableAllAudio)
self.audioMgr = AudioManagerBase()
self.musicMgr = MusicManager()
self.gameStateFSM = GameStateFSM()
def isClientBuilt(self):
try:
import buildData
return True
except:
return False
def toggleFPS(self):
self.__fpsEnabled = not self.__fpsEnabled
base.setFrameRateMeter(self.__fpsEnabled)
def openMainWindow(self, *args, **kw):
success = ShowBase.openMainWindow(self, *args, **kw)
if not success:
self.notify.error('Failed to open game window!')
return
if self.win:
self.win.setSort(500)
self.win.setChildSort(10)
self.__postOpenWindow()
self.__isMainWindowOpen = success
self.__setCursorAndIcon()
def __postOpenWindow(self):
pass
def __setCursorAndIcon(self):
wp = WindowProperties()
if self.isClientBuilt():
if sys.platform == 'darwin':
wp.setIconFilename(Filename.fromOsSpecific(os.path.join(os.getcwd(), 'icon500.ico')))
else:
wp.setIconFilename(Filename.fromOsSpecific(os.path.join(os.getcwd(), 'icon256.ico')))
else:
wp.setIconFilename(Filename.fromOsSpecific(os.path.join(os.getcwd(), 'resources/etc/icon.ico')))
if self.config.GetBool('want-custom-cursor', False):
wp.setCursorFilename(Filename.fromOsSpecific(os.path.join(tempdir, 'pointer.cur')))
self.win.requestProperties(wp)
def takeScreenshot(self):
if not config.GetBool('want-screenshots', True):
return
self.notify.info('Taking Screenshot.')
if not os.path.exists('screenshots'):
os.mkdir('screenshots')
dt = time.localtime()
date_time = '%04d-%02d-%02d_%02d-%02d-%02d' % (dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
uFilename = 'screenshots/screenshot_' + date_time + '.' + base.screenshotExtension
pandafile = Filename(str(ExecutionEnvironment.getCwd()) + '/' + str(uFilename))
pandafile.makeDir()
fn = base.screenshot(namePrefix = uFilename, defaultFilename = 0)
winfile = pandafile.toOsSpecific()
self.notify.info('Screenshot captured: ' + winfile)
screenShotNotice = DirectLabel(text = 'Screenshot Captured:\n' + winfile, scale = 0.050000, pos = (0.0, 0.0, 0.299), text_bg = (1, 1, 1, 0), text_fg = (1, 1, 1, 1), frameColor = (1, 1, 1, 0))
screenShotNotice.reparentTo(base.a2dBottomCenter)
screenShotNotice.setBin('gui-popup', 0)
def clearScreenshotMsg(event):
screenShotNotice.destroy()
taskMgr.doMethodLater(3.0, clearScreenshotMsg, 'clearScreenshot')
|
ProjectNugget/Project-Nugget
|
nugget/nuggetbase/NuggetBase.py
|
Python
|
bsd-3-clause
| 5,469
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['Lag1Trend'] , ['Seasonal_DayOfWeek'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_Seasonal_DayOfWeek_SVR.py
|
Python
|
bsd-3-clause
| 170
|
# Copyright (C) 2011, 2012, 2015 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Contains the abstract base class PlotListener for listeners that
create plots of vectors at each iteration.
"""
import PISM.logging
import PISM.vec
def pauseListener(*args):
"""Listener that temporarily halts operation at each iteration waiting for a key press."""
PISM.logging.pause()
class PlotListener(object):
"""Base class for listeners that create plots of vectors at each iteration.
Provides objects for converting :cpp:class:`IceModelVec`'s to ``numpy`` vectors
on processor zero, as well as basic ``matplotlib`` figure management."""
def __init__(self, grid):
self.grid = grid
self.tz_scalar = PISM.vec.ToProcZero(grid, dof=1)
self.tz_vector = PISM.vec.ToProcZero(grid, dof=2)
self.figs = {}
def toproczero(self, *args):
"""Returns a ``numpy`` vector on processor zero corresponding to an :cpp:class:`IceModelVec`.
Takes as input either a single :cpp:class:`IceModelVec` or dictionary of such
vectors and the name of an entry. Returns ``None`` on other processors."""
if len(args) == 2:
data = args[0]
name = args[1]
v = data[name]
else:
v = args[0]
if v is None:
return None
if v.get_ndof() == 1:
return self.tz_scalar.communicate(v)
return self.tz_vector.communicate(v)
def figure(self, name='default'):
"""Returns a ``matplotlib`` figure based on a string name. If the instance has not yet
created a figure with the given name, a new figure is created and associated with the given name."""
fig = self.figs.get(name)
if fig is None:
import matplotlib.pyplot as pp
fig = pp.figure()
self.figs[name] = fig
return fig.number
def __call__(self, solver, itr, data):
raise NotImplementedError()
|
talbrecht/pism_pik07
|
site-packages/PISM/invert/listener.py
|
Python
|
gpl-3.0
| 2,656
|
#!/usr/bin/env python
"""phpfpmstats - Munin Plugin for monitoring PHP FPM (Fast Process Manager).
Requirements
- The PHP FPM status page must be configured and it must have access
permissions from localhost.
Wild Card Plugin - No
Multigraph Plugin - Graph Structure
- php_fpm_connections
- php_fpm_processes
Environment Variables
host: Web Server Host. (Default: 127.0.0.1)
port: Web Server Port. (Default: 80, SSL: 443)
user: User in case authentication is required for access to
FPM Status page.
password: Password in case authentication is required for access to
FPM Status page.
monpath: FPM status page path relative to Document Root.
(Default: fpm_status.php)
ssl: Use SSL if yes. (Default: no)
include_graphs: Comma separated list of enabled graphs.
(All graphs enabled by default.)
exclude_graphs: Comma separated list of disabled graphs.
Environment Variables for Multiple Instances of Plugin (Omitted by default.)
instance_name: Name of instance.
instance_label: Graph title label for instance.
(Default is the same as instance name.)
instance_label_format: One of the following values:
- suffix (Default)
- prefix
- none
Example:
[phpfpmstats]
env.exclude_graphs php_fpm_processes
"""
# Munin - Magic Markers
#%# family=auto
#%# capabilities=autoconf nosuggest
import sys
from pymunin import MuninGraph, MuninPlugin, muninMain
from pysysinfo.phpfpm import PHPfpmInfo
__author__ = "Ali Onur Uyar"
__copyright__ = "Copyright 2011, Ali Onur Uyar"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.20"
__maintainer__ = "Ali Onur Uyar"
__email__ = "aouyar at gmail.com"
__status__ = "Development"
class MuninPHPfpmPlugin(MuninPlugin):
"""Multigraph Munin Plugin for monitoring PHP Fast Process Manager (FPM).
"""
plugin_name = 'phpfpmstats'
isMultigraph = True
isMultiInstance = True
def __init__(self, argv=(), env=None, debug=False):
"""Populate Munin Plugin with MuninGraph instances.
@param argv: List of command line arguments.
@param env: Dictionary of environment variables.
@param debug: Print debugging messages if True. (Default: False)
"""
MuninPlugin.__init__(self, argv, env, debug)
self._host = self.envGet('host')
self._port = self.envGet('port', None, int)
self._user = self.envGet('user')
self._monpath = self.envGet('monpath')
self._password = self.envGet('password')
self._ssl = self.envCheckFlag('ssl', False)
self._category = 'PHP'
if self.graphEnabled('php_fpm_connections'):
graph = MuninGraph('PHP FPM - Connections per second', self._category,
info='PHP Fast Process Manager (FPM) - Connections per second.',
args='--base 1000 --lower-limit 0')
graph.addField('conn', 'conn', draw='LINE2', type='DERIVE', min=0)
self.appendGraph('php_fpm_connections', graph)
if self.graphEnabled('php_fpm_processes'):
graph = MuninGraph('PHP FPM - Processes', self._category,
info='PHP Fast Process Manager (FPM) - Active / Idle Processes.',
args='--base 1000 --lower-limit 0')
graph.addField('active', 'active', draw='AREASTACK', type='GAUGE')
graph.addField('idle', 'idle', draw='AREASTACK', type='GAUGE')
graph.addField('total', 'total', draw='LINE2', type='GAUGE',
colour='000000')
self.appendGraph('php_fpm_processes', graph)
def retrieveVals(self):
"""Retrieve values for graphs."""
fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password,
self._monpath, self._ssl)
stats = fpminfo.getStats()
if self.hasGraph('php_fpm_connections') and stats:
self.setGraphVal('php_fpm_connections', 'conn',
stats['accepted conn'])
if self.hasGraph('php_fpm_processes') and stats:
self.setGraphVal('php_fpm_processes', 'active',
stats['active processes'])
self.setGraphVal('php_fpm_processes', 'idle',
stats['idle processes'])
self.setGraphVal('php_fpm_processes', 'total',
stats['total processes'])
def autoconf(self):
"""Implements Munin Plugin Auto-Configuration Option.
@return: True if plugin can be auto-configured, False otherwise.
"""
fpminfo = PHPfpmInfo(self._host, self._port, self._user, self._password,
self._monpath, self._ssl)
return fpminfo is not None
def main():
sys.exit(muninMain(MuninPHPfpmPlugin))
if __name__ == "__main__":
main()
|
aouyar/PyMunin
|
pymunin/plugins/phpfpmstats.py
|
Python
|
gpl-3.0
| 5,175
|
import hashlib
import logging
from collections import defaultdict
from xml.parsers.expat import ExpatError
import requests
from django import http
from django.conf import settings
from django.core.cache import cache
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.db.models import Q, Count
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from jsonview.decorators import json_view
import xmltodict
from airmozilla.base.utils import (
paginate,
get_base_url,
prepare_vidly_video_url,
)
from airmozilla.main.models import Event, VidlySubmission, Template
from airmozilla.manage import forms
from airmozilla.manage import vidly
from airmozilla.manage import archiver
from airmozilla.manage import videoinfo
from airmozilla.main.tasks import (
create_all_timestamp_pictures,
create_all_event_pictures,
)
from .decorators import superuser_required
@superuser_required
def vidly_media(request):
events = Event.objects.filter(
Q(template__name__contains='Vid.ly') |
Q(pk__in=VidlySubmission.objects.all()
.values_list('event_id', flat=True))
)
q_event = request.GET.get('event', '')
status = request.GET.get('status')
repeated = request.GET.get('repeated') == 'event'
repeats = {}
if status:
if status not in ('New', 'Processing', 'Finished', 'Error'):
return http.HttpResponseBadRequest("Invalid 'status' value")
# make a list of all tags -> events
_tags = {}
for event in events:
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
continue
_tags[environment['tag']] = event.id
event_ids = []
for tag in vidly.medialist(status):
try:
event_ids.append(_tags[tag])
except KeyError:
# it's on vid.ly but not in this database
logging.debug("Unknown event with tag=%r", tag)
events = events.filter(id__in=event_ids)
elif repeated:
repeats = dict(
(x['event_id'], x['event__id__count'])
for x in
VidlySubmission.objects
.values('event_id')
.annotate(Count('event__id'))
.filter(event__id__count__gt=1)
)
events = Event.objects.filter(id__in=repeats.keys())
if q_event:
events = events.filter(
Q(title__icontains=q_event) | Q(slug__iexact=q_event)
)
def get_repeats(event):
return repeats[event.id]
events = events.order_by('-start_time')
events = events.select_related('template')
paged = paginate(events, request.GET.get('page'), 15)
submissions = defaultdict(list)
for submission in VidlySubmission.objects.filter(event__in=paged):
submissions[submission.event_id].append(submission)
vidly_resubmit_form = forms.VidlyResubmitForm()
context = {
'paginate': paged,
'status': status,
'vidly_resubmit_form': vidly_resubmit_form,
'repeated': repeated,
'get_repeats': get_repeats,
'submissions': submissions,
'event': q_event,
}
return render(request, 'manage/vidly_media.html', context)
@superuser_required
@json_view
def vidly_media_status(request):
context = {}
if request.GET.get('tag'):
tag = request.GET.get('tag')
else:
if not request.GET.get('id'):
return http.HttpResponseBadRequest("No 'id'")
event = get_object_or_404(Event, pk=request.GET['id'])
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
# perhaps it has a VidlySubmission anyway
submissions = (
VidlySubmission.objects
.exclude(tag__isnull=True)
.filter(event=event).order_by('-submission_time')
)
for submission in submissions[:1]:
environment = {'tag': submission.tag}
break
else:
return {}
tag = environment['tag']
cache_key = 'vidly-query-{md5}'.format(
md5=hashlib.md5(tag.encode('utf8')).hexdigest().strip())
force = request.GET.get('refresh', False)
if force:
results = None # force a refresh
else:
results = cache.get(cache_key)
if not results:
results = vidly.query(tag).get(tag, {})
expires = 60
# if it's healthy we might as well cache a bit
# longer because this is potentially used a lot
if results.get('Status') == 'Finished':
expires = 60 * 60
if results:
cache.set(cache_key, results, expires)
context['status'] = results.get('Status')
return context
@superuser_required
@json_view
def vidly_media_info(request):
def as_fields(result):
return [
{'key': a, 'value': b}
for (a, b)
in sorted(result.items())
]
if not request.GET.get('id'):
return http.HttpResponseBadRequest("No 'id'")
event = get_object_or_404(Event, pk=request.GET['id'])
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
# perhaps it has a VidlySubmission anyway
submissions = (
VidlySubmission.objects
.exclude(tag__isnull=True)
.filter(event=event).order_by('-submission_time')
)
for submission in submissions[:1]:
environment = {'tag': submission.tag}
break
if not environment.get('tag') or environment.get('tag') == 'None':
return {'fields': as_fields({
'*Note*': 'Not a valid tag in template',
'*Template contents*': unicode(environment),
})}
else:
tag = environment['tag']
cache_key = 'vidly-query-%s' % tag
force = request.GET.get('refresh', False)
if force:
results = None # force a refresh
else:
results = cache.get(cache_key)
if not results:
all_results = vidly.query(tag)
if tag not in all_results:
return {
'ERRORS': ['Tag (%s) not found in Vid.ly' % tag]
}
results = all_results[tag]
cache.set(cache_key, results, 60)
data = {'fields': as_fields(results)}
is_hd = results.get('IsHD', False)
if is_hd == 'false':
is_hd = False
data['past_submission'] = {
'url': results['SourceFile'],
'email': results['UserEmail'],
'hd': bool(is_hd),
'token_protection': event.privacy != Event.PRIVACY_PUBLIC,
}
if request.GET.get('past_submission_info'):
qs = (
VidlySubmission.objects
.filter(event=event)
.order_by('-submission_time')
)
for submission in qs[:1]:
if event.privacy != Event.PRIVACY_PUBLIC:
# forced
token_protection = True
else:
# whatever it was before
token_protection = submission.token_protection
data['past_submission'] = {
'url': submission.url,
'email': submission.email,
'hd': submission.hd,
'token_protection': token_protection,
}
return data
@require_POST
@superuser_required
def vidly_media_resubmit(request):
if request.POST.get('cancel'):
return redirect(reverse('manage:vidly_media') + '?status=Error')
form = forms.VidlyResubmitForm(data=request.POST)
if not form.is_valid():
return http.HttpResponse(str(form.errors))
event = get_object_or_404(Event, pk=form.cleaned_data['id'])
environment = event.template_environment or {}
if not environment.get('tag') or environment.get('tag') == 'None':
raise ValueError("Not a valid tag in template")
if event.privacy != Event.PRIVACY_PUBLIC:
token_protection = True # no choice
else:
token_protection = form.cleaned_data['token_protection']
base_url = get_base_url(request)
webhook_url = base_url + reverse('manage:vidly_media_webhook')
old_tag = environment['tag']
url = prepare_vidly_video_url(form.cleaned_data['url'])
shortcode, error = vidly.add_media(
url=url,
hd=form.cleaned_data['hd'],
token_protection=token_protection,
notify_url=webhook_url,
)
VidlySubmission.objects.create(
event=event,
url=url,
token_protection=token_protection,
hd=form.cleaned_data['hd'],
tag=shortcode,
submission_error=error
)
if error:
messages.warning(
request,
"Media could not be re-submitted:\n<br>\n%s" % error
)
else:
messages.success(
request,
"Event re-submitted to use tag '%s'" % shortcode
)
vidly.delete_media(old_tag)
event.template_environment['tag'] = shortcode
event.status = Event.STATUS_PROCESSING
event.save()
cache_key = 'vidly-query-%s' % old_tag
cache.delete(cache_key)
return redirect(reverse('manage:vidly_media') + '?status=Error')
# Note that this view is publically available.
# That means we can't trust the content but we can take it as a hint.
@csrf_exempt
@require_POST
def vidly_media_webhook(request):
if not request.POST.get('xml'):
return http.HttpResponseBadRequest("no 'xml'")
# We can expect three pieces of XML.
# 1) That the media was submitted
# https://bug1135808.bugzilla.mozilla.org/attachment.cgi?id=8568143
# 2) That the media failed processing
# https://bug1135808.bugzilla.mozilla.org/attachment.cgi?id=8568190
# 3) That the media succeeded processing
# https://bug1135808.bugzilla.mozilla.org/attachment.cgi?id=8568149
#
# If it's case number 1, just ignore it and do nothing.
# If it's case number 2 or number 3, take it as a hint you can't trust
# and kick off the auto-archive procedure.
xml_string = request.POST['xml'].strip()
try:
struct = xmltodict.parse(xml_string)
except ExpatError:
return http.HttpResponseBadRequest("Bad 'xml'")
try:
struct['Response']['Result']['Task']
archiver.auto_archive()
except KeyError:
# If it doesn't have a "Result" or "Task", it was just a notification
# that the media was added.
pass
return http.HttpResponse('OK\n')
@superuser_required
def vidly_media_timings(request):
context = {
}
return render(request, 'manage/vidly_media_timings.html', context)
@superuser_required
@json_view
def vidly_media_timings_data(request):
points = VidlySubmission.get_recent_points(100)
slope_and_intercept = VidlySubmission.get_general_least_square_slope(
points=points
)
if slope_and_intercept is None:
# This is the bootstrapping case when you don't have enough data.
# It only applies if you have an empty database.
slope, intercept = None, None
else:
slope, intercept = slope_and_intercept
context = {
'points': points,
'slope': slope,
'intercept': intercept,
}
return context
@superuser_required
def legacy_video_migration(request): # pragma: no cover
"""for one off mass vid.ly submission"""
class VideoURLError(Exception):
pass
def redirect_recurse(url):
"""return the URL only when it's not redirecting.
Raise an error on all other statuses >= 400
"""
response = requests.head(url)
if response.status_code in (301, 302):
return redirect_recurse(response.headers['Location'])
elif response.status_code >= 400:
raise VideoURLError('{} => {}'.format(
url, response.status_code
))
return url
if request.method == 'POST':
events = Event.objects.filter(id__in=request.POST.getlist('ids'))
template, = Template.objects.filter(default_archive_template=True)
for event in events:
try:
url = event.template_environment['url']
url = redirect_recurse(url)
base_url = get_base_url(request)
webhook_url = base_url + reverse('manage:vidly_media_webhook')
url = prepare_vidly_video_url(url)
token_protection = event.privacy != Event.PRIVACY_PUBLIC
shortcode, error = vidly.add_media(
url,
hd=True,
token_protection=token_protection,
notify_url=webhook_url,
)
VidlySubmission.objects.create(
event=event,
url=url,
token_protection=token_protection,
hd=True,
tag=shortcode,
submission_error=error
)
event.template_environment = {
'tag': shortcode,
}
if shortcode:
event.template = template
event.archive_time = None
event.status = Event.STATUS_PROCESSING
event.save()
videoinfo.fetch_duration(
event,
video_url=url,
save=True,
verbose=settings.DEBUG
)
if Event.objects.get(id=event.id).duration:
create_all_event_pictures.delay(
event.id,
video_url=url,
)
create_all_timestamp_pictures.delay(
event.id,
video_url=url,
)
except Exception as exception:
error = str(exception)
messages.error(
request,
'Failed to submit "{}". Error: {}'.format(
event.title,
error,
)
)
messages.success(
request,
'Submitted {} events for Vid.ly processing'.format(
events.count()
)
)
return redirect('manage:legacy_video_migration')
search = request.GET.get('search', 'http://videos.mozilla.org/')
if search:
events = Event.objects.filter(
template_environment__icontains=search
)
else:
events = Event.objects.none()
context = {
'events': events,
'search': search,
}
return render(request, 'manage/legacy_video_migration.html', context)
|
blossomica/airmozilla
|
airmozilla/manage/views/vidly_media.py
|
Python
|
bsd-3-clause
| 15,188
|
#!/usr/bin/python
# _*_ coding: utf-8 _*_
s = '271828'
def chr2num(s):
return {'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8,
'9':9}[s]
def str2int(s):
return reduce(lambda x, y: x * 10 + y, map(chr2num, s))
print str2int(s)
|
louistin/fullstack
|
Python/functional_programming/higher_order_function/map_reduce_20161026.py
|
Python
|
mit
| 265
|
import os
import zipfile
from io import BytesIO
from django.test import TestCase
from django.core.files import File
from django.http import HttpResponse
from django.test.client import RequestFactory
from zipview.views import BaseZipView
class ZipView(BaseZipView):
"""Test ZipView basic implementation."""
_files = None
def get_files(self):
if self._files is None:
dirname = os.path.dirname(__file__)
self._files = [
File(
open(os.path.join(dirname, 'files', 'test_file.txt'), 'rb'),
name='files/test_file.txt',
),
File(
open(os.path.join(dirname, 'files', 'test_file.odt'), 'rb'),
name='files/test_file.odt',
),
]
return self._files
class ZipViewTests(TestCase):
def setUp(self):
self.view = ZipView()
self.request = RequestFactory()
def test_response_type(self):
response = self.view.get(self.request)
self.assertTrue(isinstance(response, HttpResponse))
def test_response_params(self):
response = self.view.get(self.request)
self.assertEqual(response['Content-Type'], 'application/zip')
self.assertEqual(response['Content-Disposition'], 'attachment; filename=download.zip')
def test_response_content_length(self):
response = self.view.get(self.request)
self.assertEqual(response['Content-Length'], '19819')
def test_valid_zipfile(self):
response = self.view.get(self.request)
content = BytesIO(response.content)
self.assertTrue(zipfile.is_zipfile(content))
zip_file = zipfile.ZipFile(content)
self.assertEqual(
zip_file.namelist(),
['files/test_file.txt', 'files/test_file.odt'])
def test_custom_archive_name(self):
self.view.get_archive_name = lambda request: 'toto.zip'
response = self.view.get(self.request)
self.assertEqual(response['Content-Disposition'], 'attachment; filename=toto.zip')
|
thibault/django-zipview
|
zipview/tests/test_views.py
|
Python
|
mit
| 2,101
|
# -*- coding: utf-8 -*-
"""Markdown r/w models """
class MarkdownItem:
"""Models anything that can be written in Markdown"""
TYPES = ["text", "url", "image", "title"]
ATTRIBUTES = ["ref", "size"]
def __init__(self, text, item_type, attributes=None):
"""
:param text: Text property to write
:param item_type: Type of item
:param attributes: Extra param, like url, ref ... Each key MUST be in
MarkdownItem.ATTRIBUTES
"""
self.text = str(text)
self.type = item_type
self.attributes = attributes
def to_markdown(self):
"""Converts to markdown
:return: item in markdown format
"""
if self.type == "text":
return self.text
elif self.type == "url" or self.type == "image":
return "[" + self.text + "](" + self.attributes["ref"] + ")"
elif self.type == "title":
return "#" * int(self.attributes["size"]) + " " + self.text
return None
def __str__(self):
return self.to_markdown()
class MarkdownTable:
"""Models and writes a table to .md"""
def __init__(self, labels, table):
"""
:param labels: Column names
:param table: table data
"""
self.labels = labels
self.table = table
@staticmethod
def _get_row(items):
"""
:param items: array
:return: markdown-formatted array
"""
items = [
str(item)
for item in items
] # convert to strings
return "|" + "|".join(items) + "|"
def _get_header(self):
"""Gets header of table
:return: markdown-formatted header"""
out = self._get_row(self.labels)
out += "\n"
out += self._get_row(["---"] * len(self.labels)) # line below headers
return out
def to_markdown(self):
"""Converts to markdown
:return: item in markdown format
"""
out = self._get_header()
out += "\n"
for row in self.table:
out += self._get_row(row)
out += "\n"
return out
def __str__(self):
return self.to_markdown()
|
sirfoga/hal
|
hal/streams/markdown.py
|
Python
|
apache-2.0
| 2,222
|
#!/usr/bin/env python
"""
A linting tool to check for xss vulnerabilities.
"""
if __name__ == "__main__":
from xsslint.main import main
main()
|
a-parhom/edx-platform
|
scripts/xsslint/xss_linter.py
|
Python
|
agpl-3.0
| 153
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2012 Intel Corporation. All rights reserved.
# Copyright (c) 2013 University of Szeged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import fnmatch
import logging
import re
from datetime import datetime
from optparse import make_option
from webkitpy.tool import steps
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.config.committers import CommitterList
import webkitpy.common.config.urls as config_urls
from webkitpy.common.net.buildbot import BuildBot
from webkitpy.common.net.bugzilla import Bugzilla
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.user import User
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import Command
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.port import platform_options, configuration_options
_log = logging.getLogger(__name__)
class SuggestReviewers(AbstractSequencedCommand):
name = "suggest-reviewers"
help_text = "Suggest reviewers for a patch based on recent changes to the modified files."
steps = [
steps.SuggestReviewers,
]
def _prepare_state(self, options, args, tool):
options.suggest_reviewers = True
class BugsToCommit(Command):
name = "bugs-to-commit"
help_text = "List bugs in the commit-queue"
def execute(self, options, args, tool):
# FIXME: This command is poorly named. It's fetching the commit-queue list here. The name implies it's fetching pending-commit (all r+'d patches).
bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue()
for bug_id in bug_ids:
print "%s" % bug_id
class PatchesInCommitQueue(Command):
name = "patches-in-commit-queue"
help_text = "List patches in the commit-queue"
def execute(self, options, args, tool):
patches = tool.bugs.queries.fetch_patches_from_commit_queue()
_log.info("Patches in commit queue:")
for patch in patches:
print patch.url()
class PatchesToCommitQueue(Command):
name = "patches-to-commit-queue"
help_text = "Patches which should be added to the commit queue"
def __init__(self):
options = [
make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
]
Command.__init__(self, options=options)
@staticmethod
def _needs_commit_queue(patch):
if patch.commit_queue() == "+": # If it's already cq+, ignore the patch.
_log.info("%s already has cq=%s" % (patch.id(), patch.commit_queue()))
return False
# We only need to worry about patches from contributers who are not yet committers.
committer_record = CommitterList().committer_by_email(patch.attacher_email())
if committer_record:
_log.info("%s committer = %s" % (patch.id(), committer_record))
return not committer_record
def execute(self, options, args, tool):
patches = tool.bugs.queries.fetch_patches_from_pending_commit_list()
patches_needing_cq = filter(self._needs_commit_queue, patches)
if options.bugs:
bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq)
bugs_needing_cq = sorted(set(bugs_needing_cq))
for bug_id in bugs_needing_cq:
print "%s" % tool.bugs.bug_url_for_bug_id(bug_id)
else:
for patch in patches_needing_cq:
print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit")
class PatchesToReview(Command):
name = "patches-to-review"
help_text = "List bugs which have attachments pending review"
def __init__(self):
options = [
make_option("--all", action="store_true",
help="Show all bugs regardless of who is on CC (it might take a while)"),
make_option("--include-cq-denied", action="store_true",
help="By default, r? patches with cq- are omitted unless this option is set"),
make_option("--cc-email",
help="Specifies the email on the CC field (defaults to your bugzilla login email)"),
]
Command.__init__(self, options=options)
def _print_report(self, report, cc_email, print_all):
if print_all:
print "Bugs with attachments pending review:"
else:
print "Bugs with attachments pending review that has %s in the CC list:" % cc_email
print "http://webkit.org/b/bugid Description (age in days)"
for row in report:
print "%s (%d)" % (row[1], row[0])
print "Total: %d" % len(report)
def _generate_report(self, bugs, include_cq_denied):
report = []
for bug in bugs:
patch = bug.unreviewed_patches()[-1]
if not include_cq_denied and patch.commit_queue() == "-":
continue
age_in_days = (datetime.today() - patch.attach_date()).days
report.append((age_in_days, "http://webkit.org/b/%-7s %s" % (bug.id(), bug.title())))
report.sort()
return report
def execute(self, options, args, tool):
tool.bugs.authenticate()
cc_email = options.cc_email
if not cc_email and not options.all:
cc_email = tool.bugs.username
bugs = tool.bugs.queries.fetch_bugs_from_review_queue(cc_email=cc_email)
report = self._generate_report(bugs, options.include_cq_denied)
self._print_report(report, cc_email, options.all)
class WhatBroke(Command):
name = "what-broke"
help_text = "Print failing buildbots (%s) and what revisions broke them" % config_urls.buildbot_url
def _print_builder_line(self, builder_name, max_name_width, status_message):
print "%s : %s" % (builder_name.ljust(max_name_width), status_message)
def _print_blame_information_for_builder(self, builder_status, name_width, avoid_flakey_tests=True):
builder = self._tool.buildbot.builder_with_name(builder_status["name"])
red_build = builder.build(builder_status["build_number"])
regression_window = builder.find_regression_window(red_build)
if not regression_window.failing_build():
self._print_builder_line(builder.name(), name_width, "FAIL (error loading build information)")
return
if not regression_window.build_before_failure():
self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: sometime before %s?)" % regression_window.failing_build().revision())
return
revisions = regression_window.revisions()
first_failure_message = ""
if (regression_window.failing_build() == builder.build(builder_status["build_number"])):
first_failure_message = " FIRST FAILURE, possibly a flaky test"
self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: %s%s)" % (revisions, first_failure_message))
for revision in revisions:
commit_info = self._tool.checkout().commit_info_for_revision(revision)
if commit_info:
print commit_info.blame_string(self._tool.bugs)
else:
print "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
def execute(self, options, args, tool):
builder_statuses = tool.buildbot.builder_statuses()
longest_builder_name = max(map(len, map(lambda builder: builder["name"], builder_statuses)))
failing_builders = 0
for builder_status in builder_statuses:
# If the builder is green, print OK, exit.
if builder_status["is_green"]:
continue
self._print_blame_information_for_builder(builder_status, name_width=longest_builder_name)
failing_builders += 1
if failing_builders:
print "%s of %s are failing" % (failing_builders, pluralize("builder", len(builder_statuses)))
else:
print "All builders are passing!"
class ResultsFor(Command):
name = "results-for"
help_text = "Print a list of failures for the passed revision from bots on %s" % config_urls.buildbot_url
argument_names = "REVISION"
def _print_layout_test_results(self, results):
if not results:
print " No results."
return
for title, files in results.parsed_results().items():
print " %s" % title
for filename in files:
print " %s" % filename
def execute(self, options, args, tool):
builders = self._tool.buildbot.builders()
for builder in builders:
print "%s:" % builder.name()
build = builder.build_for_revision(args[0], allow_failed_lookups=True)
self._print_layout_test_results(build.layout_test_results())
class FailureReason(Command):
name = "failure-reason"
help_text = "Lists revisions where individual test failures started at %s" % config_urls.buildbot_url
def _blame_line_for_revision(self, revision):
try:
commit_info = self._tool.checkout().commit_info_for_revision(revision)
except Exception, e:
return "FAILED to fetch CommitInfo for r%s, exception: %s" % (revision, e)
if not commit_info:
return "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision
return commit_info.blame_string(self._tool.bugs)
def _print_blame_information_for_transition(self, regression_window, failing_tests):
red_build = regression_window.failing_build()
print "SUCCESS: Build %s (r%s) was the first to show failures: %s" % (red_build._number, red_build.revision(), failing_tests)
print "Suspect revisions:"
for revision in regression_window.revisions():
print self._blame_line_for_revision(revision)
def _explain_failures_for_builder(self, builder, start_revision):
print "Examining failures for \"%s\", starting at r%s" % (builder.name(), start_revision)
revision_to_test = start_revision
build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
layout_test_results = build.layout_test_results()
if not layout_test_results:
# FIXME: This could be made more user friendly.
print "Failed to load layout test results from %s; can't continue. (start revision = r%s)" % (build.results_url(), start_revision)
return 1
results_to_explain = set(layout_test_results.failing_tests())
last_build_with_results = build
print "Starting at %s" % revision_to_test
while results_to_explain:
revision_to_test -= 1
new_build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True)
if not new_build:
print "No build for %s" % revision_to_test
continue
build = new_build
latest_results = build.layout_test_results()
if not latest_results:
print "No results build %s (r%s)" % (build._number, build.revision())
continue
failures = set(latest_results.failing_tests())
if len(failures) >= 20:
# FIXME: We may need to move this logic into the LayoutTestResults class.
# The buildbot stops runs after 20 failures so we don't have full results to work with here.
print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
continue
fixed_results = results_to_explain - failures
if not fixed_results:
print "No change in build %s (r%s), %s unexplained failures (%s in this build)" % (build._number, build.revision(), len(results_to_explain), len(failures))
last_build_with_results = build
continue
regression_window = RegressionWindow(build, last_build_with_results)
self._print_blame_information_for_transition(regression_window, fixed_results)
last_build_with_results = build
results_to_explain -= fixed_results
if results_to_explain:
print "Failed to explain failures: %s" % results_to_explain
return 1
print "Explained all results for %s" % builder.name()
return 0
def _builder_to_explain(self):
builder_statuses = self._tool.buildbot.builder_statuses()
red_statuses = [status for status in builder_statuses if not status["is_green"]]
print "%s failing" % (pluralize("builder", len(red_statuses)))
builder_choices = [status["name"] for status in red_statuses]
# We could offer an "All" choice here.
chosen_name = self._tool.user.prompt_with_list("Which builder to diagnose:", builder_choices)
# FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object.
for status in red_statuses:
if status["name"] == chosen_name:
return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
def execute(self, options, args, tool):
(builder, latest_revision) = self._builder_to_explain()
start_revision = self._tool.user.prompt("Revision to walk backwards from? [%s] " % latest_revision) or latest_revision
if not start_revision:
print "Revision required."
return 1
return self._explain_failures_for_builder(builder, start_revision=int(start_revision))
class FindFlakyTests(Command):
name = "find-flaky-tests"
help_text = "Lists tests that often fail for a single build at %s" % config_urls.buildbot_url
def _find_failures(self, builder, revision):
build = builder.build_for_revision(revision, allow_failed_lookups=True)
if not build:
print "No build for %s" % revision
return (None, None)
results = build.layout_test_results()
if not results:
print "No results build %s (r%s)" % (build._number, build.revision())
return (None, None)
failures = set(results.failing_tests())
if len(failures) >= 20:
# FIXME: We may need to move this logic into the LayoutTestResults class.
# The buildbot stops runs after 20 failures so we don't have full results to work with here.
print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision())
return (None, None)
return (build, failures)
def _increment_statistics(self, flaky_tests, flaky_test_statistics):
for test in flaky_tests:
count = flaky_test_statistics.get(test, 0)
flaky_test_statistics[test] = count + 1
def _print_statistics(self, statistics):
print "=== Results ==="
print "Occurances Test name"
for value, key in sorted([(value, key) for key, value in statistics.items()]):
print "%10d %s" % (value, key)
def _walk_backwards_from(self, builder, start_revision, limit):
flaky_test_statistics = {}
all_previous_failures = set([])
one_time_previous_failures = set([])
previous_build = None
for i in range(limit):
revision = start_revision - i
print "Analyzing %s ... " % revision,
(build, failures) = self._find_failures(builder, revision)
if failures == None:
# Notice that we don't loop on the empty set!
continue
print "has %s failures" % len(failures)
flaky_tests = one_time_previous_failures - failures
if flaky_tests:
print "Flaky tests: %s %s" % (sorted(flaky_tests),
previous_build.results_url())
self._increment_statistics(flaky_tests, flaky_test_statistics)
one_time_previous_failures = failures - all_previous_failures
all_previous_failures = failures
previous_build = build
self._print_statistics(flaky_test_statistics)
def _builder_to_analyze(self):
statuses = self._tool.buildbot.builder_statuses()
choices = [status["name"] for status in statuses]
chosen_name = self._tool.user.prompt_with_list("Which builder to analyze:", choices)
for status in statuses:
if status["name"] == chosen_name:
return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
def execute(self, options, args, tool):
(builder, latest_revision) = self._builder_to_analyze()
limit = self._tool.user.prompt("How many revisions to look through? [10000] ") or 10000
return self._walk_backwards_from(builder, latest_revision, limit=int(limit))
class TreeStatus(Command):
name = "tree-status"
help_text = "Print the status of the %s buildbots" % config_urls.buildbot_url
long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder
and displayes the status of each builder."""
def execute(self, options, args, tool):
for builder in tool.buildbot.builder_statuses():
status_string = "ok" if builder["is_green"] else "FAIL"
print "%s : %s" % (status_string.ljust(4), builder["name"])
class CrashLog(Command):
name = "crash-log"
help_text = "Print the newest crash log for the given process"
long_help = """Finds the newest crash log matching the given process name
and PID and prints it to stdout."""
argument_names = "PROCESS_NAME [PID]"
def execute(self, options, args, tool):
crash_logs = CrashLogs(tool)
pid = None
if len(args) > 1:
pid = int(args[1])
print crash_logs.find_newest_log(args[0], pid)
class PrintExpectations(Command):
name = 'print-expectations'
help_text = 'Print the expected result for the given test(s) on the given port(s)'
def __init__(self):
options = [
make_option('--all', action='store_true', default=False,
help='display the expectations for *all* tests'),
make_option('-x', '--exclude-keyword', action='append', default=[],
help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
make_option('-i', '--include-keyword', action='append', default=[],
help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
make_option('--csv', action='store_true', default=False,
help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
make_option('-f', '--full', action='store_true', default=False,
help='Print a full TestExpectations-style line for every match'),
make_option('--paths', action='store_true', default=False,
help='display the paths for all applicable expectation files'),
] + platform_options(use_globs=True)
Command.__init__(self, options=options)
self._expectation_models = {}
def execute(self, options, args, tool):
if not options.paths and not args and not options.all:
print "You must either specify one or more test paths or --all."
return
if options.platform:
port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
if not port_names:
default_port = tool.port_factory.get(options.platform)
if default_port:
port_names = [default_port.name()]
else:
print "No port names match '%s'" % options.platform
return
else:
default_port = tool.port_factory.get(port_names[0])
else:
default_port = tool.port_factory.get(options=options)
port_names = [default_port.name()]
if options.paths:
files = default_port.expectations_files()
layout_tests_dir = default_port.layout_tests_dir()
for file in files:
if file.startswith(layout_tests_dir):
file = file.replace(layout_tests_dir, 'LayoutTests')
print file
return
tests = set(default_port.tests(args))
for port_name in port_names:
model = self._model(options, port_name, tests)
tests_to_print = self._filter_tests(options, model, tests)
lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
if port_name != port_names[0]:
print
print '\n'.join(self._format_lines(options, port_name, lines))
def _filter_tests(self, options, model, tests):
filtered_tests = set()
if options.include_keyword:
for keyword in options.include_keyword:
filtered_tests.update(model.get_test_set_for_keyword(keyword))
else:
filtered_tests = tests
for keyword in options.exclude_keyword:
filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
return filtered_tests
def _format_lines(self, options, port_name, lines):
output = []
if options.csv:
for line in lines:
output.append("%s,%s" % (port_name, line.to_csv()))
elif lines:
include_modifiers = options.full
include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
output.append("// For %s" % port_name)
for line in lines:
output.append("%s" % line.to_string(None, include_modifiers, include_expectations, include_comment=False))
return output
def _model(self, options, port_name, tests):
port = self._tool.port_factory.get(port_name, options)
return TestExpectations(port, tests).model()
class PrintBaselines(Command):
name = 'print-baselines'
help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
def __init__(self):
options = [
make_option('--all', action='store_true', default=False,
help='display the baselines for *all* tests'),
make_option('--csv', action='store_true', default=False,
help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
make_option('--include-virtual-tests', action='store_true',
help='Include virtual tests'),
] + platform_options(use_globs=True)
Command.__init__(self, options=options)
self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
def execute(self, options, args, tool):
if not args and not options.all:
print "You must either specify one or more test paths or --all."
return
default_port = tool.port_factory.get()
if options.platform:
port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
if not port_names:
print "No port names match '%s'" % options.platform
else:
port_names = [default_port.name()]
if options.include_virtual_tests:
tests = sorted(default_port.tests(args))
else:
# FIXME: make real_tests() a public method.
tests = sorted(default_port._real_tests(args))
for port_name in port_names:
if port_name != port_names[0]:
print
if not options.csv:
print "// For %s" % port_name
port = tool.port_factory.get(port_name)
for test_name in tests:
self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
def _print_baselines(self, options, port_name, test_name, baselines):
for extension in sorted(baselines.keys()):
baseline_location = baselines[extension]
if baseline_location:
if options.csv:
print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
extension[1:], baseline_location, self._platform_for_path(baseline_location))
else:
print baseline_location
def _platform_for_path(self, relpath):
platform_matchobj = self._platform_regexp.match(relpath)
if platform_matchobj:
return platform_matchobj.group(1)
return None
class FindResolvedBugs(Command):
name = "find-resolved-bugs"
help_text = "Collect the RESOLVED bugs in the given TestExpectations file"
argument_names = "TEST_EXPECTATIONS_FILE"
def execute(self, options, args, tool):
filename = args[0]
if not tool.filesystem.isfile(filename):
print "The given path is not a file, please pass a valid path."
return
ids = set()
inputfile = tool.filesystem.open_text_file_for_reading(filename)
for line in inputfile:
result = re.search("(https://bugs\.webkit\.org/show_bug\.cgi\?id=|webkit\.org/b/)([0-9]+)", line)
if result:
ids.add(result.group(2))
inputfile.close()
resolved_ids = set()
num_of_bugs = len(ids)
bugzilla = Bugzilla()
for i, bugid in enumerate(ids, start=1):
bug = bugzilla.fetch_bug(bugid)
print "Checking bug %s \t [%d/%d]" % (bugid, i, num_of_bugs)
if not bug.is_open():
resolved_ids.add(bugid)
print "Resolved bugs in %s :" % (filename)
for bugid in resolved_ids:
print "https://bugs.webkit.org/show_bug.cgi?id=%s" % (bugid)
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/tool/commands/queries.py
|
Python
|
bsd-3-clause
| 27,953
|
import random
nsamples = 100
for sample in range(nsamples):
x, y = random.gauss(0.0, 1.0), random.gauss(0.0, 1.0)
print x, y
|
vinhqdang/my_mooc
|
MOOC-work/coursera/FINISHED/Statistical Mechanics Algorithms and Computations/Week 4/programs_lecture_4/gauss_2d.py
|
Python
|
mit
| 134
|
from django.apps import AppConfig
class StaticpagesConfig(AppConfig):
name = 'staticpages'
|
Datateknologerna-vid-Abo-Akademi/date-website
|
staticpages/apps.py
|
Python
|
cc0-1.0
| 97
|
"""
This is the go division class.
It represents a playing division in a go tournament. In an (or the) open
division, all players play even games against each other. In a handicap
division, player play against each other with the higher ranked player
taking white, and giving the lower ranked player a number of handicap
stones equal to the different in their ranks.
In some tournaments, the number of handicap stones are further modified
(AGA City League, ACGA CGL).
A players in a division is represented by a graph. Each player in the
division is represented by a node. The graph is initialized into a
clique. Each edge represent a possible game that can be played in the
tournament in this division. The division class also hold another graph
to represent the games that has been played, which will be empty
initially.
As each game has been play, the edge that represent that game in the
clique is severed, and the edge is added to the graph that represent the
games that has been played.
"""
import networkx as nx
from GoPlayer import Player
from GoMatch import Match
from pprint import pprint
from fake_player_data import player_list
from random import choice
import unittest
G = nx.Graph()
G.add_nodes_from(player_list)
class Division(object):
"""Division Class
It holds all the players in the division, and all the possible
matches that has been played by players of this division and can be
played by players of this division
"""
def __init__(self, player_list=[]):
self.players= nx.Graph()
self.possible_matches = nx.Graph()
self.played_matches = nx.Graph()
if player_list:
if len(player_list) % 2 == 1:
"""Add a player to represent a "by"
Since there is an odd number of players, go is played
between 2 people.
"""
player_list.append(Player())
player_dict = dict(enumerate(player_list))
self.players.add_nodes_from(player_list)
for player in self.players:
for other_player in self.players:
if player != other_player:
self.possible_matches.add_edge(player,
other_player)
########################################################################
class DivisionTest(unittest.TestCase):
def setUp(self):
"""Create a clique graph for players in the division"""
self.division_graph = nx.Graph()
self.division_graph.add_nodes_from(player_list)
for i in range(len(player_list)):
for j in range(i + 1, len(player_list)):
self.division_graph.add_edge(player_list[i], player_list[j],
object=Match(player_list[i], player_list[j]))
def test_unittest_setUp(self):
self.assertTrue(len(self.division_graph.nodes()) == len(player_list))
self.assertTrue(len(self.division_graph.edges()) ==
sum(range(len(player_list))))
def test_start_up(self):
a_division = Division(player_list)
if __name__ == "__main__":
unittest.main()
|
unyth/tournament_graph
|
TournamentDivision.py
|
Python
|
mit
| 3,205
|
import random
from plugin import Plugin
class Flatter(Plugin):
def help_text(self, bot):
return bot.translate("flatter_help")
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower().startswith(bot.translate("flatter_cmd")):
if len(message.split()) >= 2:
if bot.getlanguage() == "de":
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery.txt'))), user_nick)
elif bot.getlanguage() == "en":
# Source http://www.pickuplinesgalore.com/cheesy.html
bot.send_message(channel, message.split()[1] + ", " + random.choice(list(open('lists/flattery_en.txt'))), user_nick)
|
k4cg/Rezeptionistin
|
plugins/flatter.py
|
Python
|
mit
| 687
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks, analog
import numpy as np
class qa_random_uniform_source(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_byte(self):
minimum = 0
maximum = 5
seed = 3
n_items = 10000
rnd_src = analog.random_uniform_source_b(minimum, maximum, seed)
head = blocks.head(1, n_items)
snk = blocks.vector_sink_b(1)
self.tb.connect(rnd_src, head, snk)
# set up fg
self.tb.run()
# check data
res = snk.data()
self.assertGreaterEqual(minimum, np.min(res))
self.assertLess(np.max(res), maximum)
def test_002_short(self):
minimum = 42
maximum = 1025
seed = 3
n_items = 10000
rnd_src = analog.random_uniform_source_s(minimum, maximum, seed)
head = blocks.head(2, n_items)
snk = blocks.vector_sink_s(1)
self.tb.connect(rnd_src, head, snk)
# set up fg
self.tb.run()
# check data
res = snk.data()
self.assertGreaterEqual(minimum, np.min(res))
self.assertLess(np.max(res), maximum)
def test_003_int(self):
minimum = 2 ** 12 - 2
maximum = 2 ** 17 + 5
seed = 3
n_items = 10000
rnd_src = analog.random_uniform_source_i(minimum, maximum, seed)
head = blocks.head(4, n_items)
snk = blocks.vector_sink_i(1)
self.tb.connect(rnd_src, head, snk)
# set up fg
self.tb.run()
# check data
res = snk.data()
# plt.hist(res)
# plt.show()
self.assertGreaterEqual(np.min(res), minimum)
self.assertLess(np.max(res), maximum)
if __name__ == '__main__':
gr_unittest.run(qa_random_uniform_source, "qa_random_uniform_source.xml")
|
iohannez/gnuradio
|
gr-analog/python/analog/qa_random_uniform_source.py
|
Python
|
gpl-3.0
| 2,736
|
import Entity
import pygame
import Tile
import Keyboard
import random
import Client
import gui
import copy
class EntityTreasure(Entity.Entity):
def __init__(self,level, x, y,value,image=None):
super(EntityTreasure,self).__init__(level,x,y)
self.value = value
self.decrition = "Some treasure"
self.basicFont = pygame.font.SysFont(None, 32)
self.image = image
self.blocksPath = False
try:
self.image = pygame.transform.scale(self.image, (32, 32))
except:
print "nope!",self.image
print self.x,self.y
def setDescription(self,desc):
self.description = description
def setValue(self,value):
self.value = value
"""Determins if the entity has collided
@Params:
None
@Return:
hasCollided(boolean): if the entity has collided
"""
def hasCollided(self,xa, ya):
xMin = -1
xMax = 33
yMin = -1
yMax = 33
if self.isSolidTile(xa, ya, xMin, yMin):
return True
if self.isSolidTile(xa, ya, xMin+2, yMin):
return True
if self.isSolidTile(xa, ya, xMin+2, yMin+2):
return True
if self.isSolidTile(xa, ya, xMax, yMin):
return True
if self.isSolidTile(xa, ya, xMax-2, yMin):
return True
if self.isSolidTile(xa, ya, xMax-2, yMin+2):
return True
if self.isSolidTile(xa, ya, xMax, yMax):
return True
if self.isSolidTile(xa, ya, xMax-2, yMax):
return True
if self.isSolidTile(xa, ya, xMax-2, yMax-2):
return True
if self.isSolidTile(xa, ya, xMin, yMax):
return True
if self.isSolidTile(xa, ya, xMin+2, yMax):
return True
if self.isSolidTile(xa, ya, xMin+2, yMax-2):
return True
return False
"""Updates logic associated with entity
@Params:
None
@Retrun:
None
"""
def tick(self):
super(EntityTreasure,self).tick()
self.hasCollided(0,0)
if self.entityCollidedWith!=None:
if self.entityCollidedWith.canPickUpTreasure==True:
self.entityCollidedWith.inHand = self
self.level.entities.remove(self)
self.entityCollidedWith = None
"""Renders the entity to the screen
@Params:
screen(pygame.Surface): suface to draw to
xoff(int): x offset of screen
yoff(int): y offset of screen
@Return:
None
"""
def render(self,screen,xoff,yoff):
if self.image!=None:
screen.blit(self.image, (self.x-xoff,self.y-yoff))
else:
text = self.basicFont.render(str(self.value), True, (0,0,0))
textpos = text.get_rect(center=(self.x-xoff,self.y-yoff))
screen.blit(text, textpos)
|
helloworldC2/VirtualRobot
|
EntityTreasure.py
|
Python
|
mit
| 3,359
|
"""
Logs key data from a Fronius inverter to a CSV file for later analysis.
peter.marks@pobox.com
"""
import requests
import json
import datetime
import time
# Set this to the IP address of your inverter
host = "192.168.0.112"
# number of seconds between samples, set to zero to run once and exit
sample_seconds = 60 * 5
def main():
print("started")
while True:
try:
watts = watts_generated()
now = time.strftime("%H:%M:%S")
line = "%s\t%s\n" % (now, watts)
# print(line)
write_to_logfile(line)
except requests.exceptions.ConnectTimeout:
print("Connect timeout at %s" % time.strftime("%H:%M:%S"))
if sample_seconds > 0:
time.sleep(sample_seconds)
else:
return
def write_to_logfile(line):
today = time.strftime("%Y_%m_%d")
file_name = today + ".csv"
out_file = open(file_name, "a")
out_file.write(line)
out_file.close()
def watts_generated():
url = "http://" + host + "/solar_api/v1/GetInverterRealtimeData.cgi?Scope=System"
r = requests.get(url, timeout=2)
json_data = r.json()
result = json_data["Body"]["Data"]["PAC"]["Values"]["1"]
return result
if __name__ == "__main__":
main()
|
peterbmarks/froniusLogger
|
froniusLogger.py
|
Python
|
apache-2.0
| 1,273
|
"""
This example shows how to work with the Hydrogen radial wavefunctions.
"""
from sympy import var, pprint, Integral, oo, Eq
from sympy.physics.hydrogen import R_nl
print "Hydrogen radial wavefunctions:"
var("r a")
print "R_{21}:"
pprint(R_nl(2, 1, a, r))
print "R_{60}:"
pprint(R_nl(6, 0, a, r))
print "Normalization:"
i = Integral(R_nl(1, 0, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
i = Integral(R_nl(2, 0, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
i = Integral(R_nl(2, 1, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
|
pernici/sympy
|
examples/advanced/hydrogen.py
|
Python
|
bsd-3-clause
| 558
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.fcoe_networks import FcoeNetworks
from hpOneView.resources.resource import ResourceClient
class FcoeNetworksTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._fcoe_networks = FcoeNetworks(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._fcoe_networks.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'create')
def test_create_should_use_given_values(self, mock_create):
resource = {
'name': 'vsan1',
'vlanId': '201',
'connectionTemplateUri': None,
'type': 'fcoe-networkV2',
}
resource_rest_call = resource.copy()
mock_create.return_value = {}
self._fcoe_networks.create(resource, 10)
mock_create.assert_called_once_with(resource_rest_call, timeout=10)
@mock.patch.object(ResourceClient, 'create')
def test_create_should_use_default_values(self, mock_create):
resource = {
'name': 'OneViewSDK Test FCoE Network',
}
resource_with_default_values = {
'name': 'OneViewSDK Test FCoE Network',
'type': 'fcoe-network',
}
mock_create.return_value = {}
self._fcoe_networks.create(resource)
mock_create.assert_called_once_with(resource_with_default_values, timeout=-1)
@mock.patch.object(ResourceClient, 'update')
def test_update_should_use_given_values(self, mock_update):
resource = {
'name': 'vsan1',
'vlanId': '201',
'connectionTemplateUri': None,
'type': 'fcoe-networkV2',
}
resource_rest_call = resource.copy()
mock_update.return_value = {}
self._fcoe_networks.update(resource, timeout=12)
mock_update.assert_called_once_with(resource_rest_call, timeout=12)
@mock.patch.object(ResourceClient, 'update')
def test_update_should_use_default_values(self, mock_update):
resource = {
'name': 'OneViewSDK Test FCoE Network',
}
resource_with_default_values = {
'name': 'OneViewSDK Test FCoE Network',
'type': 'fcoe-network',
}
mock_update.return_value = {}
self._fcoe_networks.update(resource)
mock_update.assert_called_once_with(resource_with_default_values, timeout=-1)
@mock.patch.object(ResourceClient, 'delete')
def test_delete_called_once(self, mock_delete):
id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._fcoe_networks.delete(id, force=False, timeout=50)
mock_delete.assert_called_once_with(id, force=False, timeout=50)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._fcoe_networks.get_by('name', 'OneViewSDK Test FCoE Network')
mock_get_by.assert_called_once_with('name', 'OneViewSDK Test FCoE Network')
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._fcoe_networks.get('3518be0e-17c1-4189-8f81-83f3724f6155')
mock_get.assert_called_once_with('3518be0e-17c1-4189-8f81-83f3724f6155')
@mock.patch.object(ResourceClient, 'get')
def test_get_with_uri_called_once(self, mock_get):
uri = '/rest/fcoe-networks/3518be0e-17c1-4189-8f81-83f3724f6155'
self._fcoe_networks.get(uri)
mock_get.assert_called_once_with(uri)
|
danielreed/python-hpOneView
|
tests/unit/resources/networking/test_fcoe_networks.py
|
Python
|
mit
| 4,952
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import re
import os
import select
import sys
import subprocess
from color import Coloring
from command import Command, MirrorSafeCommand
_CAN_COLOR = [
'branch',
'diff',
'grep',
'log',
]
class ForallColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'forall')
self.project = self.printer('project', attr='bold')
class Forall(Command, MirrorSafeCommand):
common = False
helpSummary = "Run a shell command in each project"
helpUsage = """
%prog [<project>...] -c <command> [<arg>...]
"""
helpDescription = """
Executes the same shell command in each project.
Output Formatting
-----------------
The -p option causes '%prog' to bind pipes to the command's stdin,
stdout and stderr streams, and pipe all output into a continuous
stream that is displayed in a single pager session. Project headings
are inserted before the output of each command is displayed. If the
command produces no output in a project, no heading is displayed.
The formatting convention used by -p is very suitable for some
types of searching, e.g. `repo forall -p -c git log -SFoo` will
print all commits that add or remove references to Foo.
The -v option causes '%prog' to display stderr messages if a
command produces output only on stderr. Normally the -p option
causes command output to be suppressed until the command produces
at least one byte of output on stdout.
Environment
-----------
pwd is the project's working directory. If the current client is
a mirror client, then pwd is the Git repository.
REPO_PROJECT is set to the unique name of the project.
REPO_PATH is the path relative the the root of the client.
REPO_REMOTE is the name of the remote system from the manifest.
REPO_LREV is the name of the revision from the manifest, translated
to a local tracking branch. If you need to pass the manifest
revision to a locally executed git command, use REPO_LREV.
REPO_RREV is the name of the revision from the manifest, exactly
as written in the manifest.
shell positional arguments ($1, $2, .., $#) are set to any arguments
following <command>.
Unless -p is used, stdin, stdout, stderr are inherited from the
terminal and are not redirected.
"""
def _Options(self, p):
def cmd(option, opt_str, value, parser):
setattr(parser.values, option.dest, list(parser.rargs))
while parser.rargs:
del parser.rargs[0]
p.add_option('-c', '--command',
help='Command (and arguments) to execute',
dest='command',
action='callback',
callback=cmd)
g = p.add_option_group('Output')
g.add_option('-p',
dest='project_header', action='store_true',
help='Show project headers before output')
g.add_option('-v', '--verbose',
dest='verbose', action='store_true',
help='Show command error messages')
def WantPager(self, opt):
return opt.project_header
def Execute(self, opt, args):
if not opt.command:
self.Usage()
cmd = [opt.command[0]]
shell = True
if re.compile(r'^[a-z0-9A-Z_/\.-]+$').match(cmd[0]):
shell = False
if shell:
cmd.append(cmd[0])
cmd.extend(opt.command[1:])
if opt.project_header \
and not shell \
and cmd[0] == 'git':
# If this is a direct git command that can enable colorized
# output and the user prefers coloring, add --color into the
# command line because we are going to wrap the command into
# a pipe and git won't know coloring should activate.
#
for cn in cmd[1:]:
if not cn.startswith('-'):
break
if cn in _CAN_COLOR:
class ColorCmd(Coloring):
def __init__(self, config, cmd):
Coloring.__init__(self, config, cmd)
if ColorCmd(self.manifest.manifestProject.config, cn).is_on:
cmd.insert(cmd.index(cn) + 1, '--color')
mirror = self.manifest.IsMirror
out = ForallColoring(self.manifest.manifestProject.config)
out.redirect(sys.stdout)
rc = 0
first = True
for project in self.GetProjects(args):
env = dict(os.environ.iteritems())
def setenv(name, val):
if val is None:
val = ''
env[name] = val
setenv('REPO_PROJECT', project.name)
setenv('REPO_PATH', project.relpath)
setenv('REPO_REMOTE', project.remote.name)
setenv('REPO_LREV', project.GetRevisionId())
setenv('REPO_RREV', project.revisionExpr)
if mirror:
setenv('GIT_DIR', project.gitdir)
cwd = project.gitdir
else:
cwd = project.worktree
if not os.path.exists(cwd):
if (opt.project_header and opt.verbose) \
or not opt.project_header:
print >>sys.stderr, 'skipping %s/' % project.relpath
continue
if opt.project_header:
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
else:
stdin = None
stdout = None
stderr = None
p = subprocess.Popen(cmd,
cwd = cwd,
shell = shell,
env = env,
stdin = stdin,
stdout = stdout,
stderr = stderr)
if opt.project_header:
class sfd(object):
def __init__(self, fd, dest):
self.fd = fd
self.dest = dest
def fileno(self):
return self.fd.fileno()
empty = True
didout = False
errbuf = ''
p.stdin.close()
s_in = [sfd(p.stdout, sys.stdout),
sfd(p.stderr, sys.stderr)]
for s in s_in:
flags = fcntl.fcntl(s.fd, fcntl.F_GETFL)
fcntl.fcntl(s.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
while s_in:
in_ready, out_ready, err_ready = select.select(s_in, [], [])
for s in in_ready:
buf = s.fd.read(4096)
if not buf:
s.fd.close()
s_in.remove(s)
continue
if not opt.verbose:
if s.fd == p.stdout:
didout = True
else:
errbuf += buf
continue
if empty:
if first:
first = False
else:
out.nl()
out.project('project %s/', project.relpath)
out.nl()
out.flush()
if errbuf:
sys.stderr.write(errbuf)
sys.stderr.flush()
errbuf = ''
empty = False
s.dest.write(buf)
s.dest.flush()
r = p.wait()
if r != 0 and r != rc:
rc = r
if rc != 0:
sys.exit(rc)
|
abstrakraft/repo
|
subcmds/forall.py
|
Python
|
apache-2.0
| 7,450
|
from bokeh.layouts import layout
from bokeh.models import Div, RangeSlider, Spinner
from bokeh.plotting import figure, show
# prepare some data
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [4, 5, 5, 7, 2, 6, 4, 9, 1, 3]
# create plot with circle glyphs
p = figure(x_range=(1, 9), width=500, height=250)
points = p.circle(x=x, y=y, size=30, fill_color="#21a7df")
# set up textarea (div)
div = Div(
text="""
<p>Select the circle's size using this control element:</p>
""",
width=200,
height=30,
)
# set up spinner
spinner = Spinner(
title="Circle size",
low=0,
high=60,
step=5,
value=points.glyph.size,
width=200,
)
spinner.js_link("value", points.glyph, "size")
# set up RangeSlider
range_slider = RangeSlider(
title="Adjust x-axis range",
start=0,
end=10,
step=1,
value=(p.x_range.start, p.x_range.end),
)
range_slider.js_link("value", p.x_range, "start", attr_selector=0)
range_slider.js_link("value", p.x_range, "end", attr_selector=1)
# create layout
layout = layout(
[
[div, spinner],
[range_slider],
[p],
]
)
# show result
show(layout)
|
bokeh/bokeh
|
sphinx/source/docs/first_steps/examples/first_steps_9_widgets.py
|
Python
|
bsd-3-clause
| 1,151
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobfeasible - Check current job feasibility for queued job
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.jobfeasible import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/jobfeasible.py
|
Python
|
gpl-2.0
| 1,099
|
import unittest
from sklearn.ensemble import RandomForestClassifier
from ParamSklearn.components.feature_preprocessing.feature_agglomeration import FeatureAgglomeration
from ParamSklearn.util import _test_preprocessing, PreprocessingTestCase, \
get_dataset
import sklearn.metrics
class FeatureAgglomerationComponentTest(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(FeatureAgglomeration)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_classify(self):
for i in range(3):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=False)
configuration_space = FeatureAgglomeration.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = FeatureAgglomeration(random_state=1,
**{hp_name: default[hp_name] for
hp_name in default})
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = RandomForestClassifier(random_state=1)
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)
self.assertAlmostEqual(accuracy, 0.8026715)
def test_preprocessing_dtype(self):
super(FeatureAgglomerationComponentTest,
self)._test_preprocessing_dtype(FeatureAgglomeration,
test_sparse=False)
|
automl/paramsklearn
|
tests/components/feature_preprocessing/test_feature_agglomeration.py
|
Python
|
bsd-3-clause
| 1,946
|
from JumpScale import j
class CodeExecutor:
def __init__(self):
pass
def evalFile(self,path):
content=j.system.fs.fileGetContents(path)
content=self.eval(content)
j.system.fs.writeFile(path,content)
def eval(self,code):
return j.tools.text.eval(code)
def _tostr(self,result):
return j.tools.text.tostr(result)
|
Jumpscale/jumpscale6_core
|
lib/JumpScale/baselib/codeexecutor/CodeExecutor.py
|
Python
|
bsd-2-clause
| 378
|
"""
Simple utility module to generate random passwords
"""
import os
import string
DEFAULT_PASSWORD_LENGTH = 12
ALLOWED_PASSWORD_CHARS = string.ascii_letters + string.digits + string.punctuation
def random_password(length=DEFAULT_PASSWORD_LENGTH):
"""
Returns a random password of `length` characters
"""
pw = ''
for i in range(length):
idx = int(os.urandom(1)) % len(ALLOWED_PASSWORD_CHARS)
pw += ALLOWED_PASSWORD_CHARS[idx]
return pw
|
Korrigan/pepperstack
|
pepperstack/utils/cred.py
|
Python
|
mit
| 481
|
from setuptools import setup, find_packages
setup(
name="dish",
version="0.0.0",
author="James J. Porter",
author_email="porterjamesj@gmail.com",
description="distributed shell",
license="MIT",
url="https://github.com/porterjamesj/dish",
packages=find_packages(),
install_requires=[
'ipython-cluster-helper == 0.2.19',
'ipython >= 2.0.0',
'logbook',
'pyzmq',
'cloud'
]
)
|
LabAdvComp/dish
|
setup.py
|
Python
|
mit
| 452
|
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import socket
import karesansui
import web
from karesansui.lib.rest import Rest, auth
from karesansui.lib.utils import is_uuid, is_int, karesansui_database_exists
from karesansui.lib.utils import generate_phrase, generate_uuid, string_from_uuid
from karesansui.lib.file.k2v import K2V
from karesansui.lib.crypt import sha1encrypt, sha1compare
from karesansui.lib.const import MACHINE_ATTRIBUTE, MACHINE_HYPERVISOR
from karesansui.db import get_engine, get_metadata, get_session
from karesansui.db.model.user import User
from karesansui.db.model.notebook import Notebook
from karesansui.db.model.tag import Tag
from karesansui.db.model.machine import Machine
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_CHAR, CHECK_MIN, CHECK_MAX, CHECK_ONLYSPACE
from karesansui.lib.const import \
DEFAULT_LANGS, USER_LIST_RANGE, \
EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, \
USER_MIN_LENGTH, USER_MAX_LENGTH, \
EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, \
PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH, \
LANGUAGES_MIN_LENGTH, LANGUAGES_MAX_LENGTH, \
ID_MIN_LENGTH, ID_MAX_LENGTH
from karesansui.lib.utils import is_param, is_empty
def validates_user(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'nickname'):
check = False
checker.add_error(_('"%s" is required.') % _('User Name'))
else:
check = checker.check_username(
_('User Name'),
obj.input.nickname,
CHECK_EMPTY | CHECK_LENGTH | CHECK_ONLYSPACE,
min = USER_MIN_LENGTH,
max = USER_MAX_LENGTH,
) and check
if not is_param(obj.input, 'email'):
check = False
checker.add_error(_('"%s" is required.') % _('Mail Address'))
else:
check = checker.check_mailaddress(
_('Mail Address'),
obj.input.email,
CHECK_EMPTY | CHECK_LENGTH | CHECK_VALID,
min = EMAIL_MIN_LENGTH,
max = EMAIL_MAX_LENGTH,
) and check
_password_flag = True
if not is_param(obj.input, 'password'):
check = False
_password_flag = False
checker.add_error(_('"%s" is required.') % _('Password'))
if not is_param(obj.input, 'retype'):
check = False
_password_flag = False
checker.add_error(_('"%s" is required.') % _('Retype'))
if _password_flag == True:
if not is_empty(obj.input.password) or \
not is_empty(obj.input.retype):
check = checker.check_password(
_('Password'),
obj.input.password,
obj.input.password,
CHECK_EMPTY | CHECK_LENGTH,
min = PASSWORD_MIN_LENGTH,
max = PASSWORD_MAX_LENGTH,
) and check
check = checker.check_languages(
_('Language'),
obj.input.languages,
CHECK_EMPTY | CHECK_VALID | CHECK_LENGTH,
min = LANGUAGES_MIN_LENGTH,
max = LANGUAGES_MAX_LENGTH,
) and check
obj.view.alert = checker.errors
return check
class Init(Rest):
def _GET(self, *param, **params):
self.view.database_bind = karesansui.config['database.bind']
self.view.default_locale = karesansui.config['application.default.locale']
self.view.locales = DEFAULT_LANGS.keys()
if karesansui_database_exists() is True:
return web.tempredirect("/", absolute=False)
if self.is_mode_input():
return True
else:
return True
return True
def _POST(self, *param, **params):
if not validates_user(self):
return web.badrequest(self.view.alert)
engine = get_engine()
metadata = get_metadata()
session = get_session()
try:
metadata.drop_all()
metadata.tables['machine2jobgroup'].create()
metadata.create_all()
except Exception, e:
traceback.format_exc()
raise Exception('Initializing/Updating a database error - %s' % ''.join(e.args))
(password, salt) = sha1encrypt(self.input.password)
user = User(u"%s" % self.input.email,
unicode(password),
unicode(salt),
u"%s" % self.input.nickname,
u"%s" % self.input.languages,
)
session.add(user)
session.commit()
# Tag Table set.
tag = Tag(u"default")
session.add(tag)
session.commit()
# Machine Table set.
#user = session.query(User).filter(User.email == self.input.email).first()
uuid = string_from_uuid(generate_uuid())
fqdn = socket.gethostname()
notebook = Notebook(u"", u"")
machine = Machine(user,
user,
u"%s" % uuid,
u"%s" % fqdn,
MACHINE_ATTRIBUTE['HOST'],
MACHINE_HYPERVISOR['REAL'],
notebook,
[tag],
u"%s" % fqdn,
u'icon-guest1.png',
False,
None,
)
session.add(machine)
session.commit()
session.close()
return web.created(None)
urls = ('/init/?(\.part)?$', Init,)
|
karesansui/karesansui
|
karesansui/gadget/init.py
|
Python
|
mit
| 6,792
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Widget to display simulation data of a CSDF graph.
author: Sander Giesselink
"""
import sys
from PyQt5.QtWidgets import QWidget, QGraphicsItem, QPushButton, QVBoxLayout, QMenu, QAction, QInputDialog, QMessageBox
from PyQt5.QtCore import QRectF, QRect, QPointF, QPoint, Qt, QVariant
from PyQt5.QtGui import QColor, QPainter, QBrush, QPainterPath, QLinearGradient, QFont, QContextMenuEvent
from collections import Counter
import schemastyle
class Node(QGraphicsItem):
def __init__(self, widget, view, nodeName):
super().__init__()
self.ioWidth = 10
self.ioHeight = 10
self.ioHeightDifference = 10
self.nodeBodyWidth = 80
self.maxNameLength = 6
self.calculateNodeColors(QColor(Qt.red))
self.lastPos = QPointF(0, 0)
self.yTranslationLeftIO = 0
self.yTranslationRightIO = 0
self.snappingIsOn = True
self.showNeutralIO = False
self.nodeFunction = 'func'
self.clashCode = 'ccode'
self.widget = widget
self.view = view
self.nodeName = nodeName
self.nodeNameDisplayed = ''
self.edgeList = []
self.ioList = []
#Add 2x IO ('left' = left, 'right' = right /,/ 0 = neutral, 1 = input, 2 is output)
self.addNewIO('left', 0)
self.addNewIO('right', 0)
#Set flags for selecting, moving and enabling a position change event
self.setFlags(QGraphicsItem.ItemIsSelectable | QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemSendsGeometryChanges)
self.setAcceptHoverEvents(True)
self.hover = False
def boundingRect(self):
#Used for collision detection and repaint
return QRectF(0, 0, self.nodeBodyWidth + 3, self.nodeBodyHeight + 3)
def shape(self):
# Determines the collision area
path = QPainterPath()
path.addRect(0, 0, self.nodeBodyWidth, self.nodeBodyHeight)
return path
def paint(self, painter, option, widget):
lod = option.levelOfDetailFromTransform(painter.worldTransform())
self.paintNodeBody(painter, lod)
if lod > 0.2:
self.paintNodeIO(painter, lod)
if lod > 0.4:
self.paintNodeName(painter)
def paintNodeBody(self, painter, lod):
painter.setPen(schemastyle.NODE_BACKGROUND_COLOR)
brush = QBrush(schemastyle.NODE_BACKGROUND_COLOR)
if self.hover:
brush = QBrush(schemastyle.NODE_BACKGROUND_COLOR)
if QGraphicsItem.isSelected(self):
brush = QBrush(schemastyle.NODE_BACKGROUND_COLOR)
painter.setBrush(brush)
if lod > 0.1:
painter.setPen(schemastyle.NODE_SHADOW_COLOR)
painter.setBrush(QBrush(schemastyle.NODE_SHADOW_COLOR))
painter.drawRoundedRect(3, 3, self.nodeBodyWidth, self.nodeBodyHeight, 5, 5)
painter.setPen(schemastyle.NODE_BACKGROUND_COLOR)
painter.setBrush(QBrush(schemastyle.NODE_BACKGROUND_COLOR))
painter.drawRoundedRect(0, 0, self.nodeBodyWidth, self.nodeBodyHeight, 5, 5)
else:
painter.drawRect(0, 0, self.nodeBodyWidth, self.nodeBodyHeight)
def paintNodeIO(self, painter, lod):
#Draw all IO
for i in range(0, len(self.ioList)):
#Center io if one side contains less io
yTranslation = 0
if self.ioList[i][3] == 'left':
yTranslation = self.yTranslationLeftIO
else:
yTranslation = self.yTranslationRightIO
# self.ioList[i][4] is state of socket: 0 neutral 1 input 2 output
# self.ioList[i][5]: is hover
painter.setPen(schemastyle.SOCKET_NEUTRAL_COLOR)
brush = QBrush(schemastyle.SOCKET_NEUTRAL_COLOR)
painter.setBrush(brush)
#Don't paint neutral IO if disabled
if self.ioList[i][4] != 0:
if self.ioList[i][3] == 'left':
rect = QRect(self.ioList[i][0] + 5, self.ioList[i][1] + yTranslation + 1, self.ioWidth-2, self.ioHeight-2)
else:
rect = QRect(self.ioList[i][0] - 5, self.ioList[i][1] + yTranslation + 1, self.ioWidth-2, self.ioHeight-2)
painter.drawEllipse(rect)
#Paint IO name
if lod > 0.4:
painter.setFont(QFont("Arial", 6))
if self.ioList[i][3] == 'left':
painter.drawText(self.getIONameRect(i, yTranslation, self.ioList[i][3]), Qt.AlignLeft, str(self.ioList[i][6]))
else:
painter.drawText(self.getIONameRect(i, yTranslation, self.ioList[i][3]), Qt.AlignRight, str(self.ioList[i][6]))
painter.setPen(Qt.black)
def paintNodeName(self, painter):
if self.nodeNameDisplayed == '':
self.setNodeName()
font = QFont("Arial", 12)
font.setItalic(True)
painter.setFont(font)
rect = QRectF(0, 0, self.nodeBodyWidth, self.nodeBodyHeight)
painter.setPen(schemastyle.NODE_TEXT_COLOR)
painter.drawText(rect, Qt.AlignCenter, self.nodeNameDisplayed)
def mousePressEvent(self, event):
self.mouseIsOnIO(event.pos(), True)
super().mousePressEvent(event)
self.update()
#Must be done after super().mousePressEvent(event) in order to
#flag the node again after clicking on an input/output
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
def mouseMoveEvent(self, event):
#Code for ShiftModifier goes here
self.update()
super().mouseMoveEvent(event)
def itemChange(self, change, value):
# Move selected nodes and edges to the front, untill unselected
if change == QGraphicsItem.ItemSelectedChange:
if QGraphicsItem.isSelected(self):
#Unselected (since the flag is not updated yet)
self.setZValue(0)
self.setZValueEdges(1)
else:
#Selected
self.setZValue(4)
self.setZValueEdges(5)
#If the position of the node changes -> calculate position change
#and move edges with the node
newPos = value
if change == QGraphicsItem.ItemPositionChange:
if self.snappingIsOn:
newPos = self.snapToGrid(newPos)
posChange = newPos - self.lastPos
# Due to the grid snapping, only process when node actually moved
if not posChange.isNull():
self.moveEdges(posChange)
self.lastPos = newPos
return super(Node, self).itemChange(change, newPos)
def snapToGrid(self, position):
#Return position of closest grid point
gridSizeX = 40
gridSizeY = 10
curPos = QPoint(position.x(), position.y())
gridPos = QPoint(round(curPos.x() / gridSizeX) * gridSizeX, round(curPos.y() / gridSizeY) * gridSizeY)
return gridPos
def mouseReleaseEvent(self, event):
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
super().mouseReleaseEvent(event)
self.update()
def hoverMoveEvent(self, event):
#Don't execute when the nodeBody is selected in order to prevent unselecting the nodeBody
if not QGraphicsItem.isSelected(self):
self.mouseIsOnIO(event.pos())
super().hoverMoveEvent(event)
self.update()
#Must be done after super().mousePressEvent(event) in order to
#flag the node again after clicking on an input/output
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QGraphicsItem.ItemIsMovable, True)
def hoverEnterEvent(self, event):
self.setCursor(Qt.PointingHandCursor)
super().hoverEnterEvent(event)
self.update()
def hoverLeaveEvent(self, event):
self.hover = False
self.setHoveringToFalse()
self.setCursor(Qt.ArrowCursor)
super().hoverLeaveEvent(event)
self.update()
def contextMenuEvent(self, event):
print('node menu triggered')
# pos = event.scenePos()
# point = self.view.mapFromScene(pos)
# point = self.view.mapToGlobal(point)
# self.menu.exec(point)
def getIOPoint(self, sideIndex, side):
#Gets the point from where the IO rectangle is drawn
addWidthForRightSide = 0
if side == 'right':
addWidthForRightSide = self.nodeBodyWidth - self.ioWidth
ioPoint = QPointF(addWidthForRightSide, sideIndex * (self.ioHeightDifference + self.ioHeight) + self.ioHeight / 2)
return ioPoint
def getIOPointForEdge(self, side, ioType):
#Gets the point where an edge can connect to the IO
addedX = 0
addedY = 0
if side == 'right':
#Add x translation if the IO lies on the right side of the node
addedX = self.nodeBodyWidth
#Add y translation for the exact IO position relative to the node
ioIndex = self.getLengthRightSide()
addedY = (ioIndex - 1) * (self.ioHeightDifference + self.ioHeight) + self.ioHeight / 2 + (self.ioHeight / 2) + self.yTranslationRightIO
else:
ioIndex = self.getLengthLeftSide()
addedY = (ioIndex - 1) * (self.ioHeightDifference + self.ioHeight) + self.ioHeight / 2 + (self.ioHeight / 2) + self.yTranslationLeftIO
#Returns the calculated point of the IO
ioPoint = QPointF(self.pos().x() + addedX, self.pos().y() + addedY)
return ioPoint
def addNewIO(self, side, ioType):
if side == 'left':
i = self.getLengthLeftSide()
else:
i = self.getLengthRightSide()
#---newIO = (ioPoint.x, ioPoint.y, hasEdge, side, ioType, mouseHover, name)---
newIO = (self.getIOPoint(i, side).x(), self.getIOPoint(i, side).y(), False, side, ioType, False, '')
self.ioList.append(newIO)
#Update the nodeBodyHeight
self.updateNode()
def setIOType(self, side, ioType, name):
#Update the type paramater of the IO
i = self.getLastIOSide(side)
self.ioList.insert(i, (self.ioList[i][0], self.ioList[i][1], self.ioList[i][2], self.ioList[i][3], ioType, self.ioList[i][5], name))
del self.ioList[i + 1]
def mouseIsOnIO(self, mousePos, click = False):
#Returns the IO that the mouse is on
for i in range(0, len(self.ioList)):
#Adjust if IO is centered on a side
if self.ioList[i][3] == 'left':
yTranslation = self.yTranslationLeftIO
else:
yTranslation = self.yTranslationRightIO
#Get point of IO
IOPoint = QPointF(self.ioList[i][0], self.ioList[i][1] + yTranslation)
#If mouse is over IO -> return IO
if mousePos.x() > IOPoint.x() and mousePos.x() < IOPoint.x() + self.ioWidth:
if mousePos.y() > IOPoint.y() and mousePos.y() < IOPoint.y() + self.ioHeight:
# entry point for drawing graphs.......
# if click:
# print('mouse on IO: ' + str(i) + ' (' + str(self.ioList[i][3]) + ', ' + str(self.ioList[i][4]) + ')')
#Update the hover paramater of the IO
self.ioList.insert(i, (self.ioList[i][0], self.ioList[i][1], self.ioList[i][2], self.ioList[i][3], self.ioList[i][4], True, self.ioList[i][6]))
del self.ioList[i + 1]
self.setFlag(QGraphicsItem.ItemIsSelectable, False)
self.setFlag(QGraphicsItem.ItemIsMovable, False)
self.hover = False
return i
#If no IO is found under the mouse -> make sure hovering is enabled and return -1
self.hover = True
self.setHoveringToFalse()
return -1
def setHoveringToFalse(self):
for i in range(0, len(self.ioList)):
#Set all hover parameters to false
self.ioList.insert(i, (self.ioList[i][0], self.ioList[i][1], self.ioList[i][2], self.ioList[i][3], self.ioList[i][4], False, self.ioList[i][6]))
del self.ioList[i + 1]
def updateNode(self):
#Update the dimentional values of the node and its IO
self.calculateNodeBodyHeight()
def calculateNodeBodyHeight(self):
#Get how many inputs/outputs are on each side
ioOnLeftSide = self.getLengthLeftSide()
ioOnRightSide = self.getLengthRightSide()
#Pick the longest side
if ioOnLeftSide > ioOnRightSide:
longestSide = ioOnLeftSide
else:
longestSide = ioOnRightSide
#Make node smaller when the neutral IO is hidden
if not self.showNeutralIO:
longestSide = longestSide - 1
#Set nodeBodyHeight based on longest io side
self.nodeBodyHeight = (longestSide * (self.ioHeightDifference + self.ioHeight))
def getNodeBodyHeigth(self):
return self.nodeBodyHeight
def getLengthLeftSide(self):
countSides = Counter(elem[3] for elem in self.ioList)
return countSides['left']
def getLengthRightSide(self):
countSides = Counter(elem[3] for elem in self.ioList)
return countSides['right']
def getLastIOSide(self, side):
#Returns the index of the last IO on a side
ioIndex = 0
for i in reversed(range(len(self.ioList))):
if side in self.ioList[i]:
ioIndex = i
break
return ioIndex
def setNodeName(self):
#Determine the displayed name of the node and its location once
self.nodeNameDisplayed = self.nodeName
if len(self.nodeName) > self.maxNameLength:
#Cutoff text if the name is too long
self.nodeNameDisplayed = self.nodeName[:self.maxNameLength]
self.nodeNameDisplayed += '..'
def getRoundedRectPath(self, i, yTranslation, side):
rect = QRect(self.ioList[i][0], self.ioList[i][1] + yTranslation, self.ioWidth, self.ioHeight, 2, 2)
path = QPainterPath();
path.setFillRule(Qt.WindingFill);
path.addRoundedRect(self.ioList[i][0], self.ioList[i][1] + yTranslation, self.ioWidth, self.ioHeight, 2, 2)
#Remove rounded edges on left or right side
if side == 'left':
path.addRect(self.ioList[i][0], self.ioList[i][1] + yTranslation, 2, 2)
path.addRect(self.ioList[i][0], self.ioList[i][1] + yTranslation + self.ioHeight - 2, 2, 2)
else:
path.addRect(self.ioList[i][0] + self.ioWidth - 2, self.ioList[i][1] + yTranslation, 2, 2)
path.addRect(self.ioList[i][0] + self.ioWidth - 2, self.ioList[i][1] + yTranslation + self.ioHeight - 2, 2, 2)
return path
def getIONameRect(self, i, yTranslation, side):
if side == 'left':
rect = QRectF(self.ioList[i][0] + self.ioWidth + 2, self.ioList[i][1] + yTranslation, self.ioWidth, self.ioHeight)
else:
rect = QRectF(self.ioList[i][0] - self.ioWidth - 2, self.ioList[i][1] + yTranslation, self.ioWidth, self.ioHeight)
return rect
def addEdge(self, edge, edgeSide):
#Add new edge with: (reference to edge, 'begin' or 'end')
newEdge = (edge, edgeSide)
self.edgeList.append(newEdge)
def moveEdges(self, posChange, side = 'both'):
#Move edges connected to node
if len(self.edgeList) > 0:
for i in range(len(self.edgeList)):
if 'begin' in self.edgeList[i]:
#Only move edge side if the entire edge is moved or the specified side is moved
if side == 'both' or side == self.ioList[i][3]:
self.edgeList[i][0].moveEdge(posChange, 'begin')
else:
if side == 'both' or side == self.ioList[i][3]:
self.edgeList[i][0].moveEdge(posChange, 'end')
def setZValueEdges(self, zValue):
for i in range(len(self.edgeList)):
self.edgeList[i][0].setZValueEdge(zValue)
def calculateNodeColors(self, color):
#Calculate all node colors based on a given color
r = color.red()
g = color.green()
b = color.blue()
if r < 60:
r = 60
if g < 60:
g = 60
if b < 60:
b = 60
self.nodeBodyColor = QColor(r, g, b)
self.nodeBodyColorGradient = QColor(r - 30, g - 30, b - 30)
self.nodeBodyColorSelected = QColor(r - 60, g - 60, b - 60)
self.nodeBodyColorHover = QColor(r - 30, g - 30, b - 30)
#Colors of IO (fixed colors)
self.nodeInputColor = QColor(230, 230, 230)
self.nodeInputColorHover = QColor(255, 255, 255)
self.nodeOutputColor = QColor(120, 120, 120)
self.nodeOutputColorHover = QColor(80, 80, 80)
self.nodeNeutralColor = QColor(180, 180, 180, 100)
self.nodeNeutralColorHover = QColor(180, 180, 180)
|
rinsewester/SchemaViz
|
node.py
|
Python
|
mit
| 17,791
|
# This challenge is super big, and it's impossible to solve with IDA alone.
# However, we are sure that most of the code is just garbage - you can't have
# a 100-point challenge with that much non-garbage code. Therefore the idea is
# to use GDB along with hardware breakpoints to find out where each byte is
# verified, and then run that single part of code inside angr to solve the
# password.
from angr.procedures.stubs.UserHook import UserHook
import angr
def prepare_state(state, known_passwords):
state = state.copy()
password = [ ]
for i in xrange(0, len(known_passwords) + 1):
password.append(state.se.BVS('password_%d' % i, 8))
state.memory.store(0xd0000000 + i, password[-1])
for i, char in enumerate(known_passwords):
state.add_constraints(password[i] == ord(char))
state.memory.store(0x6a3b7c, state.se.BVV(0, 32))
state.memory.store(0x6a3b80, state.se.BVV(0, 32))
state.regs.rbp = 0xffffffff00000000
state.memory.store(state.regs.rbp-0x148, state.se.BVV(0xd0000100, 64), endness=state.arch.memory_endness)
state.memory.store(state.regs.rbp-0x140, state.se.BVV(0xd0000100, 64), endness=state.arch.memory_endness)
return state, password
#
# A bunch of hooks so that I don't have to take care of the following code snippet:
# .text:0000000000457294 mov r8, [rbp+var_150]
# .text:000000000045729B mov r8, [r8]
# .text:000000000045729E mov r8, [r8+8]
#
# I can definitely set it up easily with angr, but I was too lazy - which is proved to be
# a mistake soon after...
def hook_rsi(state):
state.regs.rsi = 0xd0000000
def hook_r8(state):
state.regs.r8 = 0xd0000000
def hook_rdi(state):
state.regs.rdi = 0xd0000000
# Calculate the next byte of the password
def calc_one_byte(p, known_passwords, hook_func, start_addr, load_addr1, load_addr2, cmp_flag_reg, cmp_addr):
byte_pos = len(known_passwords)
p.hook(load_addr1, UserHook(user_func=hook_func, length=14))
p.hook(load_addr2, UserHook(user_func=hook_func, length=14))
state = p.factory.blank_state(addr=start_addr)
state, password = prepare_state(state, known_passwords)
sm = p.factory.simgr(state, immutable=False)
sm.step(4)
sm.step(size=cmp_addr - load_addr2)
s0 = sm.active[0].copy()
s0.add_constraints(getattr(s0.regs, cmp_flag_reg) == 0x1)
candidates = s0.se.eval_upto(password[byte_pos], 256)
# assert len(candidates) == 1
return chr(candidates[0])
def main():
p = angr.Project("counter", load_options={'auto_load_libs': False})
# I got the first letter from gdb and IDA...
# First letter is 'S'. I found it out at 0x43d2c6
known_passwords = [ 'S' ]
# Let's figure out the second letter
# Get the second char
c = calc_one_byte(p, known_passwords, hook_rsi, 0x43e099, 0x43e0a8, 0x43e10a, "r11", 0x43e175)
# Second char: chr(116) == 't'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x43ee79, 0x43ee8c, 0x43eed3, "rbx", 0x43ef38)
# Third char: chr(52) == '4'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x43fd06, 0x43fd17, 0x43fd6e, "r11", 0x43fde5)
# Fourth char: chr(116) == 't'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x440a94, 0x440aa7, 0x440b0a, "rbx", 0x440b74)
# Fifth char: chr(49) == '1'
known_passwords += [ c ]
# Why are there so many characters? I was expecting 5 at most...
c = calc_one_byte(p, known_passwords, hook_rsi, 0x4418e2, 0x4418f1, 0x441942, "r10", 0x441994)
# Sixth char: chr(99) == 'c'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x44268e, 0x44269f, 0x4426d2, "rbx", 0x44274e)
# Seventh char: chr(95) == '_'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rsi, 0x4433a5, 0x4433b4, 0x4433eb, "r11", 0x443466)
# Eighth char: chr(52) == '4'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x444194, 0x4441a5, 0x444208, "r11", 0x444260)
# Ninth char: chr(110) == 'n'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x444f51, 0x444f62, 0x444fa9, "r11", 0x445001)
# Tenth char: chr(52) == '4'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x445ddc, 0x445ded, 0x445e34, "rbx", 0x445e95)
# 11th char: chr(108) == 'l'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x446bfa, 0x446c0d, 0x446c64, "rbx", 0x446cd6)
# chr(121) == 'y'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rsi, 0x4479c4, 0x4479d3, 0x447a0a, "r10", 0x447a7a)
# chr(83) == 'S'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x44877f, 0x448792, 0x4487cd, "rbx", 0x44883f)
# chr(49) == '1'
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x449513, 0x449524, 0x44957b, "r11", 0x4495ee)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x44a29d, 0x44a2b0, 0x44a2ff, "rbx", 0x44a357)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x44b0e8, 0x44b0f9, 0x44b140, "r11", 0x44b1b3)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rsi, 0x44bded, 0x44bdfc, 0x44be4d, "r10", 0x44bebb)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x44cc4f, 0x44cc60, 0x44ccaf, "r11", 0x44ccfb)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x44d99f, 0x44d9b0, 0x44da07, "r11", 0x44da72)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x44e89a, 0x44e8ab, 0x44e8f4, "r10", 0x44e94a)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x44f67e, 0x44f68f, 0x44f6f2, "r11", 0x44f765)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x4504fe, 0x45050f, 0x450566, "r11", 0x4505bf)
known_passwords += [ c ]
# So many letters!!!!!!!!
c = calc_one_byte(p, known_passwords, hook_r8, 0x4511fe, 0x451211, 0x451268, "r14", 0x4512cd)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x4520d7, 0x4520ea, 0x452117, "r11", 0x45216f)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rsi, 0x452e82, 0x452e91, 0x452ed5, "r11", 0x452f50)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rsi, 0x453d28, 0x453d3a, 0x453d71, "r11", 0x453de6)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x454a39, 0x454a4c, 0x454a95, "r11", 0x454ae7)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x4557f9, 0x45580a, 0x455853, "r11", 0x4558c8)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_rdi, 0x45660a, 0x45661b, 0x456648, "r11", 0x4566a3)
known_passwords += [ c ]
c = calc_one_byte(p, known_passwords, hook_r8, 0x457281, 0x457294, 0x4572cf, "rbx", 0x457314)
known_passwords += [ c ]
# The last one must be '4'...
known_passwords += [ '4' ]
password = "".join(known_passwords)
print "Flag: EKO{%s}" % password
return password
def test():
assert main() == 'St4t1c_4n4lyS1s_randomstring1234'
if __name__ == "__main__":
main()
|
Ruide/angr-dev
|
angr-doc/examples/ekopartyctf2015_rev100/solve.py
|
Python
|
bsd-2-clause
| 7,445
|
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package that handles non-debug, non-file output for run-webkit-tests."""
import math
import optparse
from webkitpy.tool import grammar
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
optparse.make_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
optparse.make_option('--timing', action='store_true', default=False,
help='display per-test execution time (implies --verbose)'),
optparse.make_option('-v', '--verbose', action='store_true', default=False,
help='print a summarized result for every test (one line per test)'),
optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'),
optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'),
]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
if self._options.timing or self._options.debug_rwt_logging:
self._options.verbose = True
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("Placing test results in %s" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_results.total - run_results.expected_skips,
run_results.expected - run_results.expected_skips,
run_results.unexpected)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
stats = {}
cuml_time = 0
for result in run_results.results_by_name.values():
stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
stats[result.worker_name]['num_tests'] += 1
stats[result.worker_name]['total_time'] += result.total_run_time
cuml_time += result.total_run_time
for worker_name in stats:
self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
def _print_aggregate_test_statistics(self, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in DumpRenderTree.
individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug("")
if unexpected_slow_tests:
self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug("")
if slow_tests:
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
self._print_debug("")
if timeout_or_crash_tests:
self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
self._print_debug("")
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
min_seconds_to_print = 15
timings = []
for directory in stats:
rounded_time = round(stats[directory]['total_time'], 1)
if rounded_time > min_seconds_to_print:
timings.append((directory, rounded_time, stats[directory]['num_tests']))
if not timings:
return
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
for timing in timings:
self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(" Median: %6.3f" % median)
self._print_debug(" Mean: %6.3f" % mean)
self._print_debug(" 90th percentile: %6.3f" % percentile90)
self._print_debug(" 99th percentile: %6.3f" % percentile99)
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
def _print_one_line_summary(self, total, expected, unexpected):
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
if self._options.verbose or unexpected:
self.writeln("")
summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
summary = "All %d tests ran as expected." % expected
else:
summary = "The test ran as expected."
else:
summary = "%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str)
else:
summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str)
self._print_quiet(summary)
self._print_quiet("")
def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
if len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_completed, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
else:
suffix = ''
if self._options.verbose:
write = self._meter.write_update
else:
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
self.num_completed += 1
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected, self._options.verbose,
self._options.timing, result.test_run_time)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
self.writeln(self._test_status_line(test_name, result_message))
elif self.num_completed == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
self._completed_tests.insert(0, [test_name, result_message])
else:
self._completed_tests.append([test_name, result_message])
for test_name, result_message in self._completed_tests:
self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
self._completed_tests = []
self._running_tests.remove(test_name)
def _result_message(self, result_type, failures, expected, verbose, timing, test_run_time):
exp_string = ' unexpectedly' if not expected else ''
timing_string = ' %.4fs' % test_run_time if timing else ''
if result_type == test_expectations.PASS:
return ' passed%s%s' % (exp_string, timing_string)
else:
return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
def _print_test_trace(self, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
base = self._port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
self._print_default(' base: %s' % base)
self._print_default(' args: %s' % args)
for extension in ('.txt', '.png', '.wav'):
self._print_baseline(test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port._filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
def _print_quiet(self, msg):
self.writeln(msg)
def _print_default(self, msg):
if not self._options.quiet:
self.writeln(msg)
def _print_debug(self, msg):
if self._options.debug_rwt_logging:
self.writeln(msg)
def write_update(self, msg):
self._meter.write_update(msg)
def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
self._meter.flush()
|
espadrine/opera
|
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing.py
|
Python
|
bsd-3-clause
| 18,691
|
from setuptools import setup
from setuptools.command.test import test as TestCommand
import os
import sys
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
def read(fname):
"""
Utility function to read the README file.
Used for the long_description. It's nice, because now 1) we have a
top level README file and 2) it's easier to type in the README file
than to put a raw string in below ...
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
tests_require = [
'coverage==3.7.1',
'flake8==2.1.0',
'mock==1.0.1',
'nose==1.3.1',
'requests==2.2.1',
'tox==1.7.1',
],
install_requires = [
'six==1.6.1',
'tornado==3.2.1',
]
setup(
author="Asim Ihsan",
author_email="asim.ihsan@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
cmdclass={'test': Tox},
description="An HTTP-controlled process runner and results gatherer",
entry_points={
'console_scripts': [
'vocalsalad = vocalsalad.server:main',
],
},
install_requires=install_requires,
keywords="http command process execute",
license="MIT",
long_description=read('README.md'),
name="vocalsalad",
package_dir={'': 'vocalsalad'},
tests_require=tests_require,
url="https://github.com/asimihsan/vocalsalad",
version="0.0.1",
zip_safe=False,
)
|
asimihsan/vocalsalad
|
setup.py
|
Python
|
mit
| 1,756
|
# Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Universal functions as `Operator` and `Functional`."""
from __future__ import absolute_import
from .ufunc_ops import *
__all__ = ()
__all__ = ufunc_ops.__all__
|
kohr-h/odl
|
odl/ufunc_ops/__init__.py
|
Python
|
mpl-2.0
| 442
|
from tests.utils import should_throw
from tests.utils.registry import register
from wallace.db import SqlTable, SqlModel, String, DataType, Integer
from wallace.errors import SetupError, ValidationError
class SuperTable(SqlTable):
pass
class SuperModel(SqlModel):
pass
@register
@should_throw(SetupError, 401)
def test_pk_reqd():
class MyModel(SuperModel):
pass
MyModel.new()
@register
def test_pk():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
filed3 = String()
fields = sorted(list(MyModel._cbs_primary_key_fields))
assert fields == ['field1', 'field2']
@register
def test_pks_trickle_down():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
filed3 = String()
class SubModel(MyModel):
field4 = String(pk=True)
field5 = String()
fields = sorted(list(SubModel._cbs_primary_key_fields))
assert fields == ['field1', 'field2', 'field4']
@register
def test_pks_override():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
filed3 = String()
class SubModel(MyModel):
field2 = String()
field4 = String(pk=True)
field5 = String()
fields = sorted(list(SubModel._cbs_primary_key_fields))
assert fields == ['field1', 'field4']
@register
def test_pk_type_override():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
class SubModel(MyModel):
field2 = Integer(pk=True)
assert isinstance(SubModel.__dict__['field2'], Integer)
@register
def test_compare_pk():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
inst = MyModel.construct(new=False, field1='abc')
assert inst.primary_key == {'field1': 'abc'}
inst.field1 = 'xyz'
assert inst.primary_key == {'field1': 'abc'}
@register
@should_throw(ValidationError, 404)
def test_error_if_pk_field_missing():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
field3 = String()
inst = MyModel.construct(new=False, field1='abc', field3='xyz')
inst.primary_key
@register
@should_throw(ValidationError, 404)
def test_error_if_pk_field_empty_str():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
inst = MyModel.construct(new=False, field1='abc', field2='')
inst.primary_key
@register
@should_throw(ValidationError, 404)
def test_error_if_pk_field_null():
class MyTable(SuperTable):
table_name = 'my_sql_table'
class MyModel(SuperModel):
table = MyTable
field1 = String(pk=True)
field2 = String(pk=True)
inst = MyModel.construct(new=False, field1='abc', field2=None)
inst.primary_key
|
csira/wallace
|
tests/cases/models/sql/pk.py
|
Python
|
bsd-3-clause
| 3,520
|
import sys
print(sys.platform);
print('Hello hosvik!')
|
Hosvik/Hosvik
|
src/main.py
|
Python
|
apache-2.0
| 55
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow collective Ops."""
from tensorflow.python.ops import gen_collective_ops
def all_reduce(t,
group_size,
group_key,
instance_key,
merge_op='Add',
final_op='Id',
subdiv_offsets=(0,),
communication_hint='auto',
timeout=0):
"""Reduces tensors collectively, across devices.
Args:
t: the tensor to be reduced.
group_size: the total number of tensors to be collectively reduced.
Each must reside on a different device. Should be a positive integer.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
merge_op: string naming the binary Op to be applied to compute each
partial reduction.
final_op: string naming the unary Op to be applied to each fully
reduced value. Can be 'Id' for no operation.
subdiv_offsets: a list of integer offsets into the tensor at which each
independent subdivision should begin. Use [0] if no subdivision should
be done.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed reduction.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size < 1:
raise ValueError('Parameter `group_size` to all_reduce must be at least 1. '
f'Received: {group_size}.')
return gen_collective_ops.collective_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
merge_op=merge_op,
final_op=final_op,
subdiv_offsets=subdiv_offsets,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def all_reduce_v2(t,
group_size,
group_key,
instance_key,
merge_op='Add',
final_op='Id',
communication_hint='auto',
timeout=0,
ordering_token=None,
max_subdivs_per_device=-1,
name=None):
"""Reduces tensors collectively, across devices.
Args:
t: the tensor to be reduced.
group_size: an int32 tensor. The total number of tensors to be collectively
reduced. Each must reside on a different device. Should be a positive
integer.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
merge_op: string naming the binary Op to be applied to compute each partial
reduction.
final_op: string naming the unary Op to be applied to each fully reduced
value. Can be 'Id' for no operation.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
ordering_token: a resource tensor on the same device as the op to order
the collectives in a per-device manner by auto control dependency.
This argument can be omited when there is one collective Op per
`tf.function`, or when explicit control dependency is used instead of
auto control dependency.
max_subdivs_per_device: int specifying the maximum number of subdivisions a
tensor on a device can be divided into. The runtime uses this contraint to
parallelize processing of each per-device tensor. Setting to -1 disables
subdivision and reverts to previous behavior of not sub-dividing tensor.
Setting to 0 uses sytem defaults.
name: name of the Op.
Returns:
An Op implementing the distributed reduction.
"""
if ordering_token is not None:
ordering_token = [ordering_token]
else:
ordering_token = []
return gen_collective_ops.collective_reduce_v2(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
merge_op=merge_op,
final_op=final_op,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout,
ordering_token=ordering_token,
max_subdivs_per_device=max_subdivs_per_device,
name=name)
def all_gather(t,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Accumulates tensors collectively, across devices, along first dimension.
Args:
t: the tensor to participate in the accumulation.
group_size: the total number of tensors to be collectively accumulated.
Each must reside on a different device. Should be a positive integer.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed operation.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size < 1:
raise ValueError('Parameter `group_size` to all_gather must be at least 1.'
f' Received: {group_size}.')
return gen_collective_ops.collective_gather(
t,
shape=[0],
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def all_gather_v2(t,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0,
ordering_token=None,
name=None):
"""Accumulates tensors collectively, across devices, along first dimension.
Args:
t: the tensor to participate in the accumulation.
group_size: an int32 tensor, the total number of tensors to be collectively
accumulated. Each must reside on a different device. Should be a positive
integer.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: a float. If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
ordering_token: a resource tensor on the same device as the op to order
the collectives in a per-device manner by auto control dependency.
This argument can be omited when there is one collective Op per
`tf.function`, or when explicit control dependency is used instead of
auto control dependency.
name: name of the Op.
Returns:
An Op implementing the distributed operation.
"""
if ordering_token is not None:
ordering_token = [ordering_token]
else:
ordering_token = []
return gen_collective_ops.collective_gather_v2(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout,
ordering_token=ordering_token,
name=name)
def broadcast_send(t,
shape,
dtype,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Broadcasts one tensor to a group of others, across devices.
Args:
t: the tensor to be sent.
shape: the shape of the tensor being sent, which must agree with t.
dtype: the type of the tensor being sent, which must agree with t.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed broadcast send.
Raises:
ValueError: if any of the input parameter constraints are not met.
Note that the shape and dtype arguments appear redundant since they
should be obtainable from t. The are two reasons for including
them. First, the shape and type of tensors passed via broadcast must
be known ahead of time in their most specific form so that the receive
side can allocate memory for the operation and shape/type inference can
carry forward from there. Including the same declarations on the
send side clarifies a commitment already made. Secondly, having nearly
identical use syntax for send and receive sides may simplify tool-driven
generation of broadcast.
"""
if group_size <= 1:
raise ValueError(
'Parameter `group_size` to broadcast_send must be at least 2. '
f'Received: {group_size}.')
if t.shape != shape:
raise ValueError(
'Shape of broadcast_send tensor `t` not equal to declared shape. '
f'Received {t.shape}, expected {shape}.')
if t.dtype != dtype:
raise ValueError(
'Type of broadcast_send tensor `t` not equal to declared type. '
f'Received {t.dtype}, expected {dtype}.')
return gen_collective_ops.collective_bcast_send(
t,
shape=shape,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def broadcast_send_v2(t,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Broadcasts one tensor to a group of others, across devices.
Args:
t: the tensor to be sent.
group_size: an int32 tensor. One plus the number of receiving tensors, i.e.
the total number of devices participating. Each tensor must reside on a
different device.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the distributed broadcast send.
"""
return gen_collective_ops.collective_bcast_send_v2(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def broadcast_recv(shape,
dtype,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Receives a broadcasts tensor, across devices.
Args:
shape: Shape of the tensor to be received.
dtype: Type of the tensor to be received.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the broadcast receive.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size <= 1:
raise ValueError(
'Parameter `group_size` to broadcast_send must be at least 2. '
f'Received: {group_size}.')
return gen_collective_ops.collective_bcast_recv(
shape=shape,
T=dtype,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def broadcast_recv_v2(shape,
dtype,
group_size,
group_key,
instance_key,
communication_hint='auto',
timeout=0):
"""Receives a broadcasts tensor, across devices.
Args:
shape: an int tensor. Shape of the tensor to be received.
dtype: Type of the tensor to be received.
group_size: an int32 tensor. One plus the number of receiving tensors, i.e.
the total number of devices participating. Each tensor must reside on a
different device.
group_key: an int32 tensor identifying the group of devices.
instance_key: an int32 tensor identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout: If set to a non zero, set a completion timeout to detect staleness.
If the timer goes off, a DeadlineExceededError is raised.
The timeout value in seconds. This feature is experimental.
Returns:
An Op implementing the broadcast receive.
"""
return gen_collective_ops.collective_bcast_recv_v2(
T=dtype,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
shape=shape,
communication_hint=communication_hint.lower(),
timeout_seconds=timeout)
def initialize_communicator(group_key,
rank,
group_size,
communication_hint='auto',
timeout_seconds=0):
"""Initializes a collective communicator.
This creates a collective communicator, which represents membership to a
collective group identified by the group_key. It should be called once per
member of the group, and each member needs to be on a different device.
It blocks until all members of the group run this op.
Communicators of a group can only be initialized once. Trying to initialize
communicators for an existing group key will result in an error.
Args:
group_key: an int32 `tf.Tensor` identifying the group.
rank: an `tf.Tensor` specifying the rank of this device in the group. If
specified, the rank is required to be unique in the group.
group_size: an int32 `tf.Tensor`. The size of the group.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
timeout_seconds: If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
A resource `tf.Tensor`.
"""
return gen_collective_ops.collective_initialize_communicator(
group_key=group_key,
rank=rank,
group_size=group_size,
communication_hint=communication_hint,
timeout_seconds=timeout_seconds)
def all_reduce_v3(communicator,
t,
reduction='Add',
group_assignment=None,
timeout_seconds=None):
"""Reduces tensors mutually.
Args:
communicator: the resource `tf.Tensor` returned from
`initialize_communicator`.
t: the `tf.Tensor` to be reduced.
reduction: a string. The name of the operation to reduce the values.
Accpeted values are `"min"`, `"max"`, `"mul"`, `"add"`.
group_assignment: Optional int32 `tf.Tensor` with shape [num_groups,
num_ranks_per_group]. `group_assignment[i]` represents the ranks in the
`ith` subgroup.
timeout_seconds: If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
The reduced `tf.Tensor`.
"""
if group_assignment is None:
group_assignment = []
return gen_collective_ops.collective_reduce_v3(
communicator=communicator,
input=t,
group_assignment=group_assignment,
reduction=reduction,
timeout_seconds=timeout_seconds)
def all_to_all_v3(communicator, t, group_assignment=None, timeout_seconds=None):
"""Exchanges tensors mutually.
Args:
communicator: the resource `tf.Tensor` returned from
`initialize_communicator`.
t: a `tf.Tensor`. The first dimension should have the length as the size of
the group. `t[i]` is sent to `rank i` within the group.
group_assignment: Optional int32 `tf.Tensor` with shape [num_groups,
num_ranks_per_group]. `group_assignment[i]` represents the ranks in the
`ith` subgroup.
timeout_seconds: If set to a non zero, set a completion timeout to detect
staleness. If the timer goes off, a DeadlineExceededError is raised. The
timeout value in seconds. This feature is experimental.
Returns:
a `tf.Tensor`. `t[i]` is sent from `rank i` within the group.
"""
if group_assignment is None:
group_assignment = []
return gen_collective_ops.collective_all_to_all_v3(
communicator=communicator,
input=t,
group_assignment=group_assignment,
timeout_seconds=timeout_seconds)
|
Intel-Corporation/tensorflow
|
tensorflow/python/ops/collective_ops.py
|
Python
|
apache-2.0
| 19,889
|
# -*- coding: utf-8 -*-
#
# Django Test Utils documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 3 16:36:58 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Test Utils'
copyright = u'2009, Eric Holscher'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoTestUtilsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoTestUtils.tex', ur'Django Test Utils Documentation',
ur'Eric Holscher', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
acdha/django-test-utils
|
docs/source/conf.py
|
Python
|
mit
| 6,540
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt
from erpnext.setup.utils import get_company_currency
from erpnext.accounts.party import get_party_details
from erpnext.stock.get_item_details import get_conversion_factor
from erpnext.controllers.stock_controller import StockController
class BuyingController(StockController):
def __setup__(self):
if hasattr(self, "taxes"):
self.print_templates = {
"taxes": "templates/print_formats/includes/taxes.html"
}
def get_feed(self):
return _("From {0} | {1} {2}").format(self.supplier_name, self.currency,
self.grand_total)
def validate(self):
super(BuyingController, self).validate()
if getattr(self, "supplier", None) and not self.supplier_name:
self.supplier_name = frappe.db.get_value("Supplier",
self.supplier, "supplier_name")
self.is_item_table_empty()
self.set_qty_as_per_stock_uom()
self.validate_stock_or_nonstock_items()
self.validate_warehouse()
def set_missing_values(self, for_validate=False):
super(BuyingController, self).set_missing_values(for_validate)
self.set_supplier_from_item_default()
self.set_price_list_currency("Buying")
# set contact and address details for supplier, if they are not mentioned
if getattr(self, "supplier", None):
self.update_if_missing(get_party_details(self.supplier, party_type="Supplier"))
self.set_missing_item_details()
def set_supplier_from_item_default(self):
if self.meta.get_field("supplier") and not self.supplier:
for d in self.get("items"):
supplier = frappe.db.get_value("Item", d.item_code, "default_supplier")
if supplier:
self.supplier = supplier
break
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get("items") if getattr(d, "warehouse", None)]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_stock_or_nonstock_items(self):
if self.meta.get_field("taxes") and not self.get_stock_items():
tax_for_valuation = [d.account_head for d in self.get("taxes")
if d.category in ["Valuation", "Valuation and Total"]]
if tax_for_valuation:
frappe.throw(_("Tax Category can not be 'Valuation' or 'Valuation and Total' as all items are non-stock items"))
def set_total_in_words(self):
from frappe.utils import money_in_words
company_currency = get_company_currency(self.company)
if self.meta.get_field("base_in_words"):
self.base_in_words = money_in_words(self.base_grand_total, company_currency)
if self.meta.get_field("in_words"):
self.in_words = money_in_words(self.grand_total, self.currency)
# update valuation rate
def update_valuation_rate(self, parentfield):
"""
item_tax_amount is the total tax amount applied on that item
stored for valuation
TODO: rename item_tax_amount to valuation_tax_amount
"""
stock_items = self.get_stock_items()
stock_items_qty, stock_items_amount = 0, 0
last_stock_item_idx = 1
for d in self.get(parentfield):
if d.item_code and d.item_code in stock_items:
stock_items_qty += flt(d.qty)
stock_items_amount += flt(d.base_net_amount)
last_stock_item_idx = d.idx
total_valuation_amount = sum([flt(d.base_tax_amount_after_discount_amount) for d in self.get("taxes")
if d.category in ["Valuation", "Valuation and Total"]])
valuation_amount_adjustment = total_valuation_amount
for i, item in enumerate(self.get(parentfield)):
if item.item_code and item.qty and item.item_code in stock_items:
item_proportion = flt(item.base_net_amount) / stock_items_amount if stock_items_amount \
else flt(item.qty) / stock_items_qty
if i == (last_stock_item_idx - 1):
item.item_tax_amount = flt(valuation_amount_adjustment,
self.precision("item_tax_amount", item))
else:
item.item_tax_amount = flt(item_proportion * total_valuation_amount,
self.precision("item_tax_amount", item))
valuation_amount_adjustment -= item.item_tax_amount
self.round_floats_in(item)
item.conversion_factor = get_conversion_factor(item.item_code, item.uom).get("conversion_factor") or 1.0
qty_in_stock_uom = flt(item.qty * item.conversion_factor)
rm_supp_cost = flt(item.rm_supp_cost) if self.doctype=="Purchase Receipt" else 0.0
landed_cost_voucher_amount = flt(item.landed_cost_voucher_amount) \
if self.doctype == "Purchase Receipt" else 0.0
item.valuation_rate = ((item.base_net_amount + item.item_tax_amount + rm_supp_cost
+ landed_cost_voucher_amount) / qty_in_stock_uom)
else:
item.valuation_rate = 0.0
def validate_for_subcontracting(self):
if not self.is_subcontracted and self.sub_contracted_items:
frappe.throw(_("Please enter 'Is Subcontracted' as Yes or No"))
if self.is_subcontracted == "Yes":
if self.doctype == "Purchase Receipt" and not self.supplier_warehouse:
frappe.throw(_("Supplier Warehouse mandatory for sub-contracted Purchase Receipt"))
for item in self.get("items"):
if item in self.sub_contracted_items and not item.bom:
frappe.throw(_("Please select BOM in BOM field for Item {0}").format(item.item_code))
else:
for item in self.get("items"):
if item.bom:
item.bom = None
def create_raw_materials_supplied(self, raw_material_table):
if self.is_subcontracted=="Yes":
parent_items = []
for item in self.get("items"):
if self.doctype == "Purchase Receipt":
item.rm_supp_cost = 0.0
if item.item_code in self.sub_contracted_items:
self.update_raw_materials_supplied(item, raw_material_table)
if [item.item_code, item.name] not in parent_items:
parent_items.append([item.item_code, item.name])
self.cleanup_raw_materials_supplied(parent_items, raw_material_table)
elif self.doctype == "Purchase Receipt":
for item in self.get("items"):
item.rm_supp_cost = 0.0
def update_raw_materials_supplied(self, item, raw_material_table):
bom_items = self.get_items_from_bom(item.item_code, item.bom)
raw_materials_cost = 0
for bom_item in bom_items:
# check if exists
exists = 0
for d in self.get(raw_material_table):
if d.main_item_code == item.item_code and d.rm_item_code == bom_item.item_code \
and d.reference_name == item.name:
rm, exists = d, 1
break
if not exists:
rm = self.append(raw_material_table, {})
required_qty = flt(bom_item.qty_consumed_per_unit) * flt(item.qty) * flt(item.conversion_factor)
rm.reference_name = item.name
rm.bom_detail_no = bom_item.name
rm.main_item_code = item.item_code
rm.rm_item_code = bom_item.item_code
rm.stock_uom = bom_item.stock_uom
rm.required_qty = required_qty
rm.conversion_factor = item.conversion_factor
if self.doctype == "Purchase Receipt":
rm.consumed_qty = required_qty
rm.description = bom_item.description
if item.batch_no and not rm.batch_no:
rm.batch_no = item.batch_no
# get raw materials rate
if self.doctype == "Purchase Receipt":
from erpnext.stock.utils import get_incoming_rate
rm.rate = get_incoming_rate({
"item_code": bom_item.item_code,
"warehouse": self.supplier_warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"qty": -1 * required_qty,
"serial_no": rm.serial_no
})
if not rm.rate:
from erpnext.stock.stock_ledger import get_valuation_rate
rm.rate = get_valuation_rate(bom_item.item_code, self.supplier_warehouse)
else:
rm.rate = bom_item.rate
rm.amount = required_qty * flt(rm.rate)
raw_materials_cost += flt(rm.amount)
if self.doctype == "Purchase Receipt":
item.rm_supp_cost = raw_materials_cost
def cleanup_raw_materials_supplied(self, parent_items, raw_material_table):
"""Remove all those child items which are no longer present in main item table"""
delete_list = []
for d in self.get(raw_material_table):
if [d.main_item_code, d.reference_name] not in parent_items:
# mark for deletion from doclist
delete_list.append(d)
# delete from doclist
if delete_list:
rm_supplied_details = self.get(raw_material_table)
self.set(raw_material_table, [])
for d in rm_supplied_details:
if d not in delete_list:
self.append(raw_material_table, d)
def get_items_from_bom(self, item_code, bom):
bom_items = frappe.db.sql("""select t2.item_code,
ifnull(t2.qty, 0) / ifnull(t1.quantity, 1) as qty_consumed_per_unit,
t2.rate, t2.stock_uom, t2.name, t2.description
from `tabBOM` t1, `tabBOM Item` t2, tabItem t3
where t2.parent = t1.name and t1.item = %s
and t1.docstatus = 1 and t1.is_active = 1 and t1.name = %s
and t2.item_code = t3.name and ifnull(t3.is_stock_item, 'No') = 'Yes'""", (item_code, bom), as_dict=1)
if not bom_items:
msgprint(_("Specified BOM {0} does not exist for Item {1}").format(bom, item_code), raise_exception=1)
return bom_items
@property
def sub_contracted_items(self):
if not hasattr(self, "_sub_contracted_items"):
self._sub_contracted_items = []
item_codes = list(set(item.item_code for item in
self.get("items")))
if item_codes:
self._sub_contracted_items = [r[0] for r in frappe.db.sql("""select name
from `tabItem` where name in (%s) and is_sub_contracted_item='Yes'""" % \
(", ".join((["%s"]*len(item_codes))),), item_codes)]
return self._sub_contracted_items
@property
def purchase_items(self):
if not hasattr(self, "_purchase_items"):
self._purchase_items = []
item_codes = list(set(item.item_code for item in
self.get("items")))
if item_codes:
self._purchase_items = [r[0] for r in frappe.db.sql("""select name
from `tabItem` where name in (%s) and is_purchase_item='Yes'""" % \
(", ".join((["%s"]*len(item_codes))),), item_codes)]
return self._purchase_items
def is_item_table_empty(self):
if not len(self.get("items")):
frappe.throw(_("Item table can not be blank"))
def set_qty_as_per_stock_uom(self):
for d in self.get("items"):
if d.meta.get_field("stock_qty"):
if not d.conversion_factor:
frappe.throw(_("Row {0}: Conversion Factor is mandatory").format(d.idx))
d.stock_qty = flt(d.qty) * flt(d.conversion_factor)
|
ThiagoGarciaAlves/erpnext
|
erpnext/controllers/buying_controller.py
|
Python
|
agpl-3.0
| 10,394
|
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import base64
import jinja2
import os.path
import pathlib
import random as rand
import shutil
import subprocess
import tempfile
import textwrap
###############################################################################
# Helper Functins
###############################################################################
def to_rgb(color):
color = color.lstrip('#')
r, g, b = map(lambda x: int(x, 16), [color[:2], color[2:4], color[4:]])
return 'rgb({},{},{})'.format(r, g, b)
def copy_fonts(*fonts):
"""
Copy the fonts to the home directory.
Necessary in order to use the fonts durring the png conversion.
"""
root = pathlib.Path(os.path.expanduser('~')) / '.fonts/racovimge'
if not root.exists():
root.mkdir(parents=True)
for font in fonts:
new_path = root / font.split('/')[-1]
if not new_path.exists():
shutil.copy(font, str(new_path))
def to_png(image):
_, path = tempfile.mkstemp(suffix='.svg')
with open(path, 'w') as file:
file.write(image)
outpath = path.replace('.svg', '.png')
subprocess.call(['rsvg', path, outpath])
with open(outpath, 'rb') as file:
data = file.read()
pathlib.Path(path).unlink()
pathlib.Path(outpath).unlink()
return data
def wrap(text, width):
if not isinstance(text, str):
return text
return textwrap.wrap(
text, break_long_words=False, break_on_hyphens=False, width=width)
###############################################################################
# Jinja2 setup
###############################################################################
env = jinja2.Environment(loader=jinja2.PackageLoader('racovimge'))
env.filters['wrap'] = wrap
env.filters['rgb'] = to_rgb
###############################################################################
# Templates and Color Schemes
###############################################################################
ROOT = pathlib.Path(__file__).parent
templates = [i.stem for i in (ROOT / 'templates').glob('*.svg')]
with (ROOT / 'colors.txt').open() as file:
color_schemes = [i.split() for i in file.read().split('\n')]
fonts = ROOT / 'fonts'
fonts = [i for i in fonts.glob('*.*') if i.suffix in ('.ttf', '.otf')]
fonts = [str(i.resolve()) for i in fonts]
###############################################################################
# Covers
###############################################################################
def random(
title, author, *,
templates=templates, schemes=color_schemes, fonts=fonts,
font_size=120, font_size_author=70):
template = rand.choice(templates)
colors = rand.choice(schemes)
font = rand.choice(fonts)
return cover(
title, author, template=template, colors=colors, font=font,
font_size=font_size, font_size_author=font_size_author)
def cover(
title, author, *, template, colors, font,
font_size=120, font_size_author=70):
authors = [author] if isinstance(author, str) else author
authors = authors[:3] if authors else []
clr1, clr2, clr3, clr4, clr5 = colors
font_mimetypes = dict(
otf='font/opentype',
ttf='application/x-font-ttf')
font = pathlib.Path(font)
with font.open('rb') as file:
font_data = file.read()
font_data = base64.b64encode(font_data).decode('utf-8')
font_name = font.stem
font_type = font_mimetypes[font.suffix.lstrip('.')]
image = env.get_template(template + '.svg').render(
title=title, authors=authors,
font=font_name, font_type=font_type, font_data=font_data,
color1=clr1, color2=clr2, color3=clr3, color4=clr4, color5=clr5,
font_size=font_size, font_size_author=font_size_author)
return image
def png_random(*args, **kwargs):
copy_fonts(*kwargs.get('fonts', fonts))
return to_png(random(*args, **kwargs))
def png_cover(*args, **kwargs):
copy_fonts(kwargs['font'])
return to_png(cover(*args, **kwargs))
|
anqxyr/racovimge
|
racovimge/racovimge.py
|
Python
|
mit
| 4,222
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
import re
import urllib
import urllib2
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from owslib.csw import CatalogueServiceWeb, namespaces
from owslib.util import http_post
from urlparse import urlparse
from lxml import etree
from geonode.catalogue.backends.base import BaseCatalogueBackend
logger = logging.getLogger(__name__)
TIMEOUT = 10
METADATA_FORMATS = {
'Atom': (
'atom:entry',
'http://www.w3.org/2005/Atom'),
'DIF': (
'dif:DIF',
'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/'),
'Dublin Core': (
'csw:Record',
'http://www.opengis.net/cat/csw/2.0.2'),
'ebRIM': (
'rim:RegistryObject',
'urn:oasis:names:tc:ebxml-regrep:xsd:rim:3.0'),
'FGDC': (
'fgdc:metadata',
'http://www.opengis.net/cat/csw/csdgm'),
'ISO': (
'gmd:MD_Metadata',
'http://www.isotc211.org/2005/gmd')}
class Catalogue(CatalogueServiceWeb):
def __init__(self, *args, **kwargs):
self.url = kwargs['URL']
self.user = None
self.password = None
self.type = kwargs['ENGINE'].split('.')[-1]
self.local = False
self._group_ids = {}
self._operation_ids = {}
self.connected = False
skip_caps = kwargs.get('skip_caps', True)
CatalogueServiceWeb.__init__(self, url=self.url, skip_caps=skip_caps)
upurl = urlparse(self.url)
self.base = '%s://%s/' % (upurl.scheme, upurl.netloc)
# User and Password are optional
if 'USER'in kwargs:
self.user = kwargs['USER']
if 'PASSWORD' in kwargs:
self.password = kwargs['PASSWORD']
def __enter__(self, *args, **kwargs):
self.login()
return self
def __exit__(self, *args, **kwargs):
self.logout()
def login(self):
if self.type == 'geonetwork':
url = "%sgeonetwork/srv/en/xml.user.login" % self.base
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain"
}
post = urllib.urlencode({
"username": self.user,
"password": self.password
})
request = urllib2.Request(url, post, headers)
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(),
urllib2.HTTPRedirectHandler())
response = self.opener.open(request)
doc = etree.fromstring(response.read())
assert doc.tag == 'ok', "GeoNetwork login failed!"
self.connected = True
def logout(self):
if self.type == 'geonetwork':
url = "%sgeonetwork/srv/en/xml.user.logout" % self.base
request = urllib2.Request(url)
response = self.opener.open(request) # noqa
self.connected = False
def get_by_uuid(self, uuid):
try:
self.getrecordbyid([uuid], outputschema=namespaces["gmd"])
except BaseException:
return None
if hasattr(self, 'records'):
if len(self.records) < 1:
return None
record = self.records.values()[0]
record.keywords = []
if hasattr(
record,
'identification') and hasattr(
record.identification,
'keywords'):
for kw in record.identification.keywords:
record.keywords.extend(kw['keywords'])
return record
else:
return None
def url_for_uuid(self, uuid, outputschema):
return "%s?%s" % (self.url, urllib.urlencode({
"request": "GetRecordById",
"service": "CSW",
"version": "2.0.2",
"id": uuid,
"outputschema": outputschema,
"elementsetname": "full"
}))
def urls_for_uuid(self, uuid):
"""returns list of valid GetRecordById URLs for a given record"""
urls = []
for mformat in self.formats:
urls.append(
('text/xml',
mformat,
self.url_for_uuid(
uuid,
METADATA_FORMATS[mformat][1])))
return urls
def csw_gen_xml(self, layer, template):
id_pname = 'dc:identifier'
if self.type == 'deegree':
id_pname = 'apiso:Identifier'
tpl = get_template(template)
ctx = Context({'layer': layer,
'SITEURL': settings.SITEURL[:-1],
'id_pname': id_pname,
'LICENSES_METADATA': getattr(settings,
'LICENSES',
dict()).get('METADATA',
'never')})
md_doc = tpl.render(ctx)
return md_doc
def csw_gen_anytext(self, xml):
""" get all element data from an XML document """
xml = etree.fromstring(xml)
return ' '.join([value.strip() for value in xml.xpath('//text()')])
def csw_request(self, layer, template):
md_doc = self.csw_gen_xml(layer, template).encode('utf-8')
if self.type == 'geonetwork':
headers = {
"Content-Type": "application/xml; charset=UTF-8",
"Accept": "text/plain"
}
request = urllib2.Request(self.url, md_doc, headers)
response = self.urlopen(request)
else:
response = http_post(self.url, md_doc, timeout=TIMEOUT)
return response
def create_from_layer(self, layer):
response = self.csw_request(layer, "catalogue/transaction_insert.xml")
# TODO: Parse response, check for error report
if self.type == 'geonetwork':
# set layer.uuid based on what GeoNetwork returns
# this is needed for inserting FGDC metadata in GN
exml = etree.fromstring(response.read())
identifier = exml.find(
'{%s}InsertResult/{%s}BriefRecord/identifier' %
(namespaces['csw'], namespaces['csw'])).text
layer.uuid = identifier
# Turn on the "view" permission (aka publish) for
# the "all" group in GeoNetwork so that the layer
# will be searchable via CSW without admin login.
# all other privileges are set to False for all
# groups.
self.set_metadata_privs(layer.uuid, {"all": {"view": True}})
return self.url_for_uuid(layer.uuid, namespaces['gmd'])
def delete_layer(self, layer):
response = self.csw_request(layer, "catalogue/transaction_delete.xml") # noqa
# TODO: Parse response, check for error report
def update_layer(self, layer):
tmpl = 'catalogue/transaction_update.xml'
if self.type == 'geonetwork':
tmpl = 'catalogue/transaction_update_gn.xml'
response = self.csw_request(layer, tmpl) # noqa
# TODO: Parse response, check for error report
def set_metadata_privs(self, uuid, privileges):
"""
set the full set of geonetwork privileges on the item with the
specified uuid based on the dictionary given of the form:
{
'group_name1': {'operation1': True, 'operation2': True, ...},
'group_name2': ...
}
all unspecified operations and operations for unspecified groups
are set to False.
"""
# XXX This is a fairly ugly workaround that makes
# requests similar to those made by the GeoNetwork
# admin based on the recommendation here:
# http://bit.ly/ccVEU7
if self.type == 'geonetwork':
get_dbid_url = '%sgeonetwork/srv/en/portal.search.present?%s' % \
(self.base, urllib.urlencode({'uuid': uuid}))
# get the id of the data.
request = urllib2.Request(get_dbid_url)
response = self.urlopen(request)
doc = etree.fromstring(response.read())
data_dbid = doc.find(
'metadata/{http://www.fao.org/geonetwork}info/id').text
# update group and operation info if needed
if len(self._group_ids) == 0:
self._group_ids = self._geonetwork_get_group_ids()
if len(self._operation_ids) == 0:
self._operation_ids = self._geonetwork_get_operation_ids()
# build params that represent the privilege configuration
priv_params = {
"id": data_dbid, # "uuid": layer.uuid, # you can say this instead in newer versions of GN
}
for group, privs in privileges.items():
group_id = self._group_ids[group.lower()]
for op, state in privs.items():
if state is not True:
continue
op_id = self._operation_ids[op.lower()]
priv_params['_%s_%s' % (group_id, op_id)] = 'on'
# update all privileges
update_privs_url = "%sgeonetwork/srv/en/metadata.admin?%s" % (
self.base, urllib.urlencode(priv_params))
request = urllib2.Request(update_privs_url)
response = self.urlopen(request)
# TODO: check for error report
def _geonetwork_get_group_ids(self):
"""
helper to fetch the set of geonetwork
groups.
"""
# get the ids of the groups.
get_groups_url = "%sgeonetwork/srv/en/xml.info?%s" % (
self.base, urllib.urlencode({'type': 'groups'}))
request = urllib2.Request(get_groups_url)
response = self.urlopen(request)
doc = etree.fromstring(response.read())
groups = {}
for gp in doc.findall('groups/group'):
groups[gp.find('name').text.lower()] = gp.attrib['id']
return groups
def _geonetwork_get_operation_ids(self):
"""
helper to fetch the set of geonetwork
'operations' (privileges)
"""
# get the ids of the operations
get_ops_url = "%sgeonetwork/srv/en/xml.info?%s" % (
self.base, urllib.urlencode({'type': 'operations'}))
request = urllib2.Request(get_ops_url)
response = self.urlopen(request)
doc = etree.fromstring(response.read())
ops = {}
for op in doc.findall('operations/operation'):
ops[op.find('name').text.lower()] = op.attrib['id']
return ops
def urlopen(self, request):
if self.opener is None:
raise Exception("No URL opener defined in geonetwork module!!")
else:
return self.opener.open(request)
def search(self, keywords, startposition, maxrecords, bbox):
"""CSW search wrapper"""
formats = []
for f in self.formats:
formats.append(METADATA_FORMATS[f][0])
return self.getrecords(typenames=' '.join(formats),
keywords=keywords,
startposition=startposition,
maxrecords=maxrecords,
bbox=bbox,
outputschema='http://www.isotc211.org/2005/gmd',
esn='full')
def normalize_bbox(self, bbox):
"""
fix bbox axis order
GeoNetwork accepts x/y
pycsw accepts y/x
"""
if self.type == 'geonetwork':
return bbox
else: # swap coords per standard
return [bbox[1], bbox[0], bbox[3], bbox[2]]
def metadatarecord2dict(self, rec):
"""
accepts a node representing a catalogue result
record and builds a POD structure representing
the search result.
"""
if rec is None:
return None
# Let owslib do some parsing for us...
result = {}
result['uuid'] = rec.identifier
result['title'] = rec.identification.title
result['abstract'] = rec.identification.abstract
keywords = []
for kw in rec.identification.keywords:
keywords.extend(kw['keywords'])
result['keywords'] = keywords
# XXX needs indexing ? how
result['attribution'] = {'title': '', 'href': ''}
result['name'] = result['uuid']
result['bbox'] = {
'minx': rec.identification.bbox.minx,
'maxx': rec.identification.bbox.maxx,
'miny': rec.identification.bbox.miny,
'maxy': rec.identification.bbox.maxy
}
# locate all distribution links
result['download_links'] = self.extract_links(rec)
# construct the link to the Catalogue metadata record (not
# self-indexed)
result['metadata_links'] = [
("text/xml",
"ISO",
self.url_for_uuid(
rec.identifier,
'http://www.isotc211.org/2005/gmd'))]
return result
def extract_links(self, rec):
# fetch all distribution links
links = []
# extract subset of description value for user-friendly display
format_re = re.compile(".*\((.*)(\s*Format*\s*)\).*?")
if not hasattr(rec, 'distribution'):
return None
if not hasattr(rec.distribution, 'online'):
return None
for link_el in rec.distribution.online:
if link_el.protocol == 'WWW:DOWNLOAD-1.0-http--download':
try:
extension = link_el.name.split('.')[-1]
format = format_re.match(link_el.description).groups()[0]
href = link_el.url
links.append((extension, format, href))
except BaseException:
pass
return links
class CatalogueBackend(BaseCatalogueBackend):
def __init__(self, *args, **kwargs):
self.catalogue = Catalogue(*args, **kwargs)
def get_record(self, uuid):
with self.catalogue:
rec = self.catalogue.get_by_uuid(uuid)
if rec is not None:
rec.links = dict()
rec.links['metadata'] = self.catalogue.urls_for_uuid(uuid)
rec.links['download'] = self.catalogue.extract_links(rec)
return rec
def search_records(self, keywords, start, limit, bbox):
with self.catalogue:
bbox = self.catalogue.normalize_bbox(bbox)
self.catalogue.search(keywords, start + 1, limit, bbox)
# build results into JSON for API
results = [
self.catalogue.metadatarecord2dict(doc) for v,
doc in self.catalogue.records.iteritems()]
result = {'rows': results,
'total': self.catalogue.results['matches'],
'next_page': self.catalogue.results.get('nextrecord', 0)}
return result
def remove_record(self, uuid):
with self.catalogue:
catalogue_record = self.catalogue.get_by_uuid(uuid)
if catalogue_record is None:
return
try:
# this is a bit hacky, delete_layer expects an instance of the layer
# model but it just passes it to a Django template so a dict works
# too.
self.catalogue.delete_layer({"uuid": uuid})
except BaseException:
logger.exception(
'Couldn\'t delete Catalogue record during cleanup()')
def create_record(self, item):
with self.catalogue:
record = self.catalogue.get_by_uuid(item.uuid)
if record is None:
md_link = self.catalogue.create_from_layer(item)
item.metadata_links = [("text/xml", "ISO", md_link)]
else:
self.catalogue.update_layer(item)
|
piensa/geonode
|
geonode/catalogue/backends/generic.py
|
Python
|
gpl-3.0
| 16,971
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AsegurarmiautoComItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
hanjihun/Car
|
asegurarmiauto_com/asegurarmiauto_com/items.py
|
Python
|
mit
| 296
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ModerationsConfig(AppConfig):
name = 'moderations'
|
CareerVillage/slack-moderation
|
src/moderations/apps.py
|
Python
|
mit
| 138
|
#!/usr/bin/env python3
import os
import sys
import redis
app_path = os.path.realpath('%s/../../..' % os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, '%s/lib' % app_path)
import settings
def get_db(conf):
return redis.StrictRedis(host=conf['host'], port=int(conf['port']), db=int(conf['id']))
if __name__ == '__main__':
import argparse
argparser = argparse.ArgumentParser(description="RedisDB clean-up utility")
argparser.add_argument('clean_what', metavar="ACTION",
help="what item group should be cleaned (session, concordance)")
args = argparser.parse_args()
patterns = {
'session': 'session:*',
'concordance': 'concordance:*'
}
if not args.clean_what in patterns:
raise ValueError('Unknown action: %s' % args.clean_what)
settings.load('%s/conf/config.xml' % app_path)
db = get_db(settings.get('plugins', 'db'))
keys = db.keys(patterns[args.clean_what])
i = 0
for key in keys:
db.delete(key)
print(('deleted: %s' % key))
i += 1
print(('Finished deleting %d keys' % i))
|
czcorpus/kontext
|
lib/plugins/redis_db/cleanup.py
|
Python
|
gpl-2.0
| 1,129
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the autocomplete example."""
import unittest
import apache_beam as beam
from apache_beam.examples.complete import autocomplete
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import equal_to
class AutocompleteTest(unittest.TestCase):
WORDS = ['this', 'this', 'that', 'to', 'to', 'to']
def test_top_prefixes(self):
p = TestPipeline()
words = p | beam.Create(self.WORDS)
result = words | autocomplete.TopPerPrefix(5)
# values must be hashable for now
result = result | beam.Map(lambda (k, vs): (k, tuple(vs)))
assert_that(result, equal_to(
[
('t', ((3, 'to'), (2, 'this'), (1, 'that'))),
('to', ((3, 'to'), )),
('th', ((2, 'this'), (1, 'that'))),
('thi', ((2, 'this'), )),
('this', ((2, 'this'), )),
('tha', ((1, 'that'), )),
('that', ((1, 'that'), )),
]))
p.run()
if __name__ == '__main__':
unittest.main()
|
vikkyrk/incubator-beam
|
sdks/python/apache_beam/examples/complete/autocomplete_test.py
|
Python
|
apache-2.0
| 1,833
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
from abc import ABCMeta, abstractmethod
from rbnics.sampling import ParameterSpaceSubset
from rbnics.utils.io import Folders
# Implementation of a class containing an offline/online decomposition of ROM for parametrized problems
class ReductionMethod(object, metaclass=ABCMeta):
def __init__(self, folder_prefix):
# I/O
self.folder_prefix = folder_prefix
self.folder = Folders()
# $$ OFFLINE DATA STRUCTURES $$ #
# Maximum reduced order space dimension to be used for the stopping criterion in the basis selection
self.Nmax = 0
# Tolerance to be used for the stopping criterion in the basis selection
self.tol = 0.
# Training set
self.training_set = ParameterSpaceSubset()
# I/O
self.folder["training_set"] = os.path.join(self.folder_prefix, "training_set")
# $$ ERROR ANALYSIS AND SPEEDUP ANALYSIS DATA STRUCTURES $$ #
# Testing set
self.testing_set = ParameterSpaceSubset()
# I/O
self.folder["testing_set"] = os.path.join(self.folder_prefix, "testing_set")
self.folder["error_analysis"] = os.path.join(self.folder_prefix, "error_analysis")
self.folder["speedup_analysis"] = os.path.join(self.folder_prefix, "speedup_analysis")
# OFFLINE: set maximum reduced space dimension (stopping criterion)
def set_Nmax(self, Nmax, **kwargs):
self.Nmax = Nmax
# OFFLINE: set tolerance (stopping criterion)
def set_tolerance(self, tol, **kwargs):
self.tol = tol
# OFFLINE: set the elements in the training set.
def initialize_training_set(self, mu_range, ntrain, enable_import=True, sampling=None, **kwargs):
# Create I/O folder
self.folder["training_set"].create()
# Test if can import
import_successful = False
if enable_import:
self.training_set.clear()
try:
self.training_set.load(self.folder["training_set"], "training_set")
except OSError:
import_successful = False
else:
import_successful = (len(self.training_set) == ntrain)
if not import_successful:
self.training_set.generate(mu_range, ntrain, sampling)
# Export
self.training_set.save(self.folder["training_set"], "training_set")
return import_successful
# ERROR ANALYSIS: set the elements in the testing set.
def initialize_testing_set(self, mu_range, ntest, enable_import=False, sampling=None, **kwargs):
# Create I/O folder
self.folder["testing_set"].create()
# Test if can import
import_successful = False
if enable_import:
self.testing_set.clear()
try:
self.testing_set.load(self.folder["testing_set"], "testing_set")
except OSError:
import_successful = False
else:
import_successful = (len(self.testing_set) == ntest)
if not import_successful:
self.testing_set.generate(mu_range, ntest, sampling)
# Export
self.testing_set.save(self.folder["testing_set"], "testing_set")
return import_successful
# Perform the offline phase of the reduced order model
@abstractmethod
def offline(self):
raise NotImplementedError("Please implement the offline phase of the reduced order model.")
# Initialize data structures required for the offline phase
def _init_offline(self):
pass
# Finalize data structures required after the offline phase
def _finalize_offline(self):
pass
# Compute the error of the reduced order approximation with respect to the full order one
# over the testing set
@abstractmethod
def error_analysis(self, N_generator=None, filename=None, **kwargs):
raise NotImplementedError("Please implement the error analysis of the reduced order model.")
# Initialize data structures required for the error analysis phase
def _init_error_analysis(self, **kwargs):
pass
# Finalize data structures required after the error analysis phase
def _finalize_error_analysis(self, **kwargs):
pass
# Compute the speedup analysis of the reduced order approximation with respect to the full order one
# over the testing set
@abstractmethod
def speedup_analysis(self, N_generator=None, filename=None, **kwargs):
raise NotImplementedError("Please implement the speedup analysis of the reduced order model.")
# Initialize data structures required for the speedup analysis phase
def _init_speedup_analysis(self, **kwargs):
pass
# Finalize data structures required after the speedup analysis phase
def _finalize_speedup_analysis(self, **kwargs):
pass
|
mathLab/RBniCS
|
rbnics/reduction_methods/base/reduction_method.py
|
Python
|
lgpl-3.0
| 4,987
|
# Copyright (C) 2008 Guild of Writers PyPRP Project Team
# See the file AUTHORS for more info about the team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Please see the file LICENSE for the full license.
import struct,StringIO,cStringIO,time
import Image,ImageFilter
from prp_HexDump import *
class tColor:
def __init__(self,r=0,g=0,b=0):
self.r=r
self.g=g
self.b=b
class tImage:
def __init__(self,w=0,h=0):
self.w=w
self.h=h
self.data=None #RGBA image
self.rawdata=cStringIO.StringIO() #compressed raw image
self.alpha=tColor(255,0,255)
self.hasalpha=0
def read(self,buf):
self.rawdata.write(buf.read((self.w * self.h)*4))
def write(self,buf):
self.rawdata.seek(0)
buf.write(self.rawdata.read())
def show(self):
self.toRGBA()
self.applyAlpha()
im = Image.new("RGBA",(self.w,self.h))
self.data.seek(0)
im.fromstring(self.data.read())
im.show()
def resize(self,w,h,blur=False):
im = Image.new("RGBA",(self.w,self.h))
self.data.seek(0)
im.fromstring(self.data.read())
im2=im.resize((w,h),Image.ANTIALIAS)
if blur:
im3 = im2.filter(ImageFilter.SMOOTH)
else:
im3 = im2
self.data=cStringIO.StringIO()
self.data.write(im3.tostring())
#print self.data.tell(),w,h
self.w=w
self.h=h
def resize_alphamult(self,w,h,alphamult=1.0,blur=False):
im = Image.new("RGBA",(self.w,self.h))
self.data.seek(0)
im.fromstring(self.data.read())
im2=im.resize((w,h),Image.ANTIALIAS)
if blur and w > 2 and h > 2:
# No point in blurring if there's only 2 pixels left :)
# Besides that, it gives trouble if you do it with less than 2 pixels
im3 = im2.filter(ImageFilter.BLUR)
else:
im3 = im2
self.data=cStringIO.StringIO()
self.data.write(im3.tostring())
self.w=w
self.h=h
if not float(alphamult) == 1.0: # No point in doing this for alphamults of 1.0 exactly....
self.alphamult(alphamult)
def alphamult(self,value):
# Multiplies the alpha value for all pixels in this image with the given value
if float(value) < 0.0:
value = 0.0
aux=cStringIO.StringIO()
self.data.seek(0)
w = self.data.read(4)
while w!="": #RGBA
r,g,b,a = struct.unpack("BBBB",w)
a = float(a) * float(value)
#RGBA
if a > 255:
a = 255
aux.write(struct.pack("BBBB",r,g,b,a))
w = self.data.read(4)
self.data=aux
def save(self,name):
self.toRGBA()
#self.fromRGBA()
#self.toRGBA()
if name[-4:]!=".png":
self.applyAlpha()
im = Image.new("RGBA",(self.w,self.h))
#self.data.read()
#print self.data.tell(), self.w, self.h, self.w*self.h*4
self.data.seek(0)
im.fromstring(self.data.read())
im.save(name)
def applyAlpha(self,bitbased=0):
if not self.hasalpha:
return
aux=cStringIO.StringIO()
self.data.seek(0)
w = self.data.read(4)
while w!="": #RGBA
r,g,b,a = struct.unpack("BBBB",w)
if bitbased:
if a<128:
r=self.alpha.r
g=self.alpha.g
b=self.alpha.b
a=0
else:
a=255
else:
#alpha
r=((r * a) / 255) + ((self.alpha.r * (255-a)) / 255)
g=((g * a) / 255) + ((self.alpha.g * (255-a)) / 255)
b=((b * a) / 255) + ((self.alpha.b * (255-a)) / 255)
#RGBA
aux.write(struct.pack("BBBB",r,g,b,a))
w = self.data.read(4)
self.data=aux
def convert(self):
self.data=cStringIO.StringIO()
self.rawdata.seek(0)
w = self.rawdata.read(4)
while w!="": #BGRA
b,g,r,a = struct.unpack("BBBB",w)
#RGBA
self.data.write(struct.pack("BBBB",r,g,b,a))
if a!=255:
self.hasalpha=1
w = self.rawdata.read(4)
def iconvert(self):
self.rawdata=cStringIO.StringIO()
self.data.seek(0)
w = self.data.read(4)
while w!="": #RGBA
b,g,r,a = struct.unpack("BBBB",w)
#BGRA
self.rawdata.write(struct.pack("BBBB",r,g,b,a))
w = self.data.read(4)
def toRGBA(self):
self.convert()
def fromRGBA(self):
self.iconvert()
def set(self):
self.fromRGBA()
class tDxtImage(tImage):
def __init__(self,w,h,type):
tImage.__init__(self,w,h)
self.type=type
if type==1:
self.texel=8
else:
self.texel=16
def read(self,buf):
if self.w<=2 or self.h<=2:
if self.w==0:
self.w=1
if self.h==0:
self.h=1
self.rawdata.write(buf.read((self.w*self.h)*4))
return
if (self.w % 4) or (self.h % 4):
raise RuntimeError, "Invalid DXT size %ix%i"%(self.w,self.h)
#size=((self.w*self.h)*self.texel)/16
texels=(self.w*self.h)/self.texel
size=(self.w*self.h)
if self.type==1:
size=size/2
self.rawdata.write(buf.read(size))
def toRGBA(self):
if self.w<=2 or self.h<=2:
self.convert()
return
self.rawdata.seek(0)
y=0
self.data=cStringIO.StringIO()
#self.data.truncate(self.h * self.w * 4)
#print "chicki",self.data.tell()
for i in range((self.h * self.w)/4):
self.data.write(" ")
assert(self.data.tell()==(self.h * self.w)*4)
self.data.seek(0)
horn = (self.w)*4
hell = (self.w-4)*4
while y<self.h:
x=0
#print y,self.h,self.w, self.data.tell()
#start=time.clock()
while x<self.w:
self.texel2rgba(self.rawdata,x,y,hell)
x+=4
if x<self.w:
self.data.seek(-3 * horn,1)
#end = time.clock()
#print "time %0.3f" % (end-start)
y+=4
#self.data.seek(3*horn,1)
def fromRGBA(self):
if self.w<=2 or self.h<=2:
self.iconvert()
return
if (self.w % 4) or (self.h % 4):
raise RuntimeError, "Invalid DXT size %ix%i"%(self.w,self.h)
self.data.seek(0)
self.rawdata=cStringIO.StringIO()
horn = (self.w)*4
hell = (self.w-4) * 4
y=0
while y<self.h:
x=0
while x<self.w:
self.rgba2texel(self.data,hell)
x+=4
if x<self.w:
self.data.seek(-3 * horn,1)
y+=4
#self.data.seek(3*horn,1)
def texel2rgba(self,texel,x,y,w):
alpha=None
if self.type!=1:
alpha=self.getAlpha(texel)
self.hasalpha=1
u64, = struct.unpack("<Q",texel.read(8))
#print "<%X" %u64
#a = 255
c0 = u64 & 0xFFFF
u64 = u64>>16
c1 = u64 & 0xFFFF
u64 = u64>>16
b0 = (c0 & 0x1f) << 3
g0 = ((c0>>5) & 0x3f) << 2
r0 = ((c0>>11) & 0x1f) << 3
b1 = (c1 & 0x1f) << 3
g1 = ((c1>>5) & 0x3f) << 2
r1 = ((c1>>11) & 0x1f) << 3
#print b0,g0,r0,b1,g1,r1
r=[]
g=[]
b=[]
a=[]
r.append(r0)
r.append(r1)
g.append(g0)
g.append(g1)
b.append(b0)
b.append(b1)
a.append(255)
a.append(255)
if self.type!=1 or c0>c1:
max=2
else:
max=1
for i in range(max):
bi=(((max-i) * b0) + ((i+1)*b1))/(max+1)
gi=(((max-i) * g0) + ((i+1)*g1))/(max+1)
ri=(((max-i) * r0) + ((i+1)*r1))/(max+1)
r.append(ri)
g.append(gi)
b.append(bi)
a.append(255)
if max==1:
r.append(ri)
g.append(gi)
b.append(bi)
a.append(0)
self.hasalpha=1
#mx=x
#my=y
for i in range(4):
for e in range(4):
test=u64 & 0x03
u64 = u64>>2
rf = r[test]
gf = g[test]
bf = b[test]
if alpha==None:
af = a[test]
else:
af = alpha[(i*4)+e]
#print b,g,r #RGBA
self.data.write(struct.pack("BBBB",rf,gf,bf,af))
#mx=mx+1
if i!=3:
self.data.seek(w,1)
#my=my+1
#mx=x
def getAlpha(self,alpha):
#alpha0, alpha1 = struct.unpack("BB",alpha.read(2))
u64, = struct.unpack("<Q",alpha.read(8))
alpha0 = u64 & 0xFF
u64 = u64>>8
alpha1 = u64 & 0xFF
u64 = u64>>8
a=[]
a.append(alpha0)
a.append(alpha1)
if alpha0 > alpha1:
max=6
else:
max=4
for i in range(max):
ai=(((max-i) * alpha0) + ((i+1)*alpha1))/(max+1)
a.append(ai)
if max==4:
a.append(0)
a.append(255)
result=[]
for i in range(16):
test=u64 & 0x07
u64 = u64>>3
res=a[test]
result.append(res)
return result
def rgba2texel(self,input,w):
r=[]
g=[]
b=[]
a=[]
alpha=0
for i in range(4):
for e in range(4):
#print input.tell(),self.w,self.h,w
ri,gi,bi,ai = struct.unpack("BBBB",input.read(4))
r.append(ri)
g.append(gi)
b.append(bi)
a.append(ai)
if self.type!=1 or (alpha==0 and ai<128):
self.hasalpha=1
alpha=1
if i!=3:
input.seek(w,1)
if self.type!=1:
self.writeAlpha(a)
#self.type=1
maxi=0
for i in range(16):
for e in range(16):
d=self.distance(r[i],r[e],g[i],g[e],b[i],b[e])
if d>=maxi:
maxi=d
r0=r[i]
r1=r[e]
g0=g[i]
g1=g[e]
b0=b[i]
b1=b[e]
c0 = r0>>3
c0 = c0<<6
c0 |= g0>>2
c0 = c0<<5
c0 |= b0>>3
c1 = r1>>3
c1 = c1<<6
c1 |= g1>>2
c1 = c1<<5
c1 |= b1>>3
#print b0,g0,r0,b1,g1,r1
#check
if c0==c1:
if c0==0x00:
c1=0xFF
r1=0xFF
g1=0xFF
b1=0xFF
else:
c1=0x00
r1=0x00
g1=0x00
b1=0x00
rt=[]
gt=[]
bt=[]
if (not alpha) or self.type!=1:
aa = max(c0,c1)
bb = min(c0,c1)
maxi=2
else:
aa = min(c0,c1)
bb = max(c0,c1)
maxi=1
if aa==c0:
rt.append(r0)
rt.append(r1)
gt.append(g0)
gt.append(g1)
bt.append(b0)
bt.append(b1)
else:
rt.append(r1)
rt.append(r0)
gt.append(g1)
gt.append(g0)
bt.append(b1)
bt.append(b0)
c0 = 0L
c1 = 0L
c0 |= aa
c1 |= bb
#assert(c0>c1)
u64 = 0L
u64 |= c0
#print "%X" %u64
#u64 = u64 << 16
u64 |= c1<<16
#print "%X" %u64
#u64 = u64 << 2
#print "%X" %u64
#print ">%X %X %X" %(u64,c0,c1)
for i in range(maxi):
bi=(((maxi-i) * bt[0]) + ((i+1)*bt[1]))/(maxi+1)
gi=(((maxi-i) * gt[0]) + ((i+1)*gt[1]))/(maxi+1)
ri=(((maxi-i) * rt[0]) + ((i+1)*rt[1]))/(maxi+1)
rt.append(ri)
gt.append(gi)
bt.append(bi)
if maxi==1:
rt.append(ri)
gt.append(gi)
bt.append(bi)
for i in range(16):
mini=255 ** 2 + 255 ** 2 + 255 ** 2
if maxi==1 and a[i]<128:
result=3L
else:
for e in range(maxi+1,-1,-1):
#print r[i],rt[e],g[i],gt[e],b[i]
d=self.distance(r[i],rt[e],g[i],gt[e],b[i],bt[e])
if d<=mini:
result=0L
result |=e
mini=d
u64 |= result << ((2*i)+32)
#if i!=15:
# u64 = u64 << 2
#print ">%X" %u64
self.rawdata.write(struct.pack("<Q",u64))
def distance(self,a1,a2,b1,b2,c1,c2):
return pow(a1-a2,2) + pow(b1-b2,2) + pow(c1-c2,2)
def writeAlpha(self,alpha):
amin=255
amax=0
max=6
for a in alpha:
if a==0:
max=4
elif a<amin:
amin=a
if a==255:
max=4
elif a>amax:
amax=a
a0=0L
a1=0L
if max==6:
a0 |=amax
a1 |=amin
else:
a0 |=amin
a1 |=amax
u64 = 0L
u64 |= a0
#u64 = u64<<8
u64 |= a1<<8
#u64 = u64<<3
if not a0>a1:
max=4
t=[]
t.append(a0)
t.append(a1)
for i in range(max):
ai=(((max-i) * a0) + ((i+1)*a1))/(max+1)
t.append(ai)
if max==4:
t.append(0)
t.append(255)
w=0
x=0
for a in alpha:
dis=255
for e in range(7,-1,-1):
if abs(a-t[e])<=dis:
dis=abs(a-t[e])
w=0L
w|=e
x=x+1
u64 |= w << ((3*(x-1))+16)
#if x!=16:
# u64 = u64<<3
self.rawdata.write(struct.pack("<Q",u64))
class tJpgImage(tImage):
def __init__(self,w,h,type=1):
tImage.__init__(self,w,h)
self.type=type
self.extra=None
self.jpg1size=0
self.jpg2size=0
# 0x00 - jpg1 RGB + jpg2 R=Alpha
# 0x01 - RLE RGB + jpg2 R=Alpha
# 0x02 - jpg1 RGB + RLE alpha 0x00FF0000
# 0x03 - RLE RGB + RLE alpha 0x00FF0000
def read(self,buf):
self.type, = struct.unpack("B",buf.read(1))
self.rawdata=cStringIO.StringIO()
self.rawdata.write(struct.pack("B",self.type))
if self.type not in [0x00,0x01,0x02,0x03]:
raise "type is %02X" %(self.type)
#The image
if self.type & 0x01: #RLE 1
count=1
while count:
count,color = struct.unpack("<II",buf.read(8))
self.rawdata.write(struct.pack("<II",count,color))
else: #JPG 1
self.jpg1size, = struct.unpack("<I",buf.read(4))
self.rawdata.write(struct.pack("<I",self.jpg1size))
self.rawdata.write(buf.read(self.jpg1size))
#The alpha channel encoded in the red channel
if self.type & 0x02: #RLE 2
count=1
while count:
count,color = struct.unpack("<II",buf.read(8))
self.rawdata.write(struct.pack("<II",count,color))
else: #JPG 1
self.jpg2size, = struct.unpack("<I",buf.read(4))
self.rawdata.write(struct.pack("<I",self.jpg2size))
self.rawdata.write(buf.read(self.jpg2size))
def toRGBA(self):
self.rawdata.seek(0)
self.data=cStringIO.StringIO()
aux=cStringIO.StringIO()
self.type, = struct.unpack("B",self.rawdata.read(1))
#Note: It does not make much sense to store images with flags 0x03,
#that one of the reasons of the little amount of found examples.
if self.type & 0x01: #RLE
count=1
tcount=0
while count:
count, = struct.unpack("<I",self.rawdata.read(4))
tcount=tcount + count
b,g,r,a = struct.unpack("BBBB",self.rawdata.read(4))
for i in range(count):
aux.write(struct.pack("BBB",r,g,b))
assert(tcount==self.w * self.h)
else: #JPG
self.jpg1size, = struct.unpack("<I",self.rawdata.read(4))
jpg1=cStringIO.StringIO()
jpg1.write(self.rawdata.read(self.jpg1size))
jpg1.seek(0)
me=Image.open(jpg1)
#me.show()
aux.write(me.tostring())
del jpg1
del me
aux.seek(0)
self.hasalpha=0
if self.type & 0x02: #RLE
count=1
tcount=0
while count:
count, = struct.unpack("<I",self.rawdata.read(4))
tcount=tcount + count
b,g,r,a = struct.unpack("BBBB",self.rawdata.read(4))
assert(a==0)
assert(b==0)
assert(g==0)
if r!=255:
self.hasalpha=1
for i in range(count):
rx,gx,bx = struct.unpack("BBB",aux.read(3))
self.data.write(struct.pack("BBBB",rx,gx,bx,r))
assert(tcount==self.w * self.h)
else: #JPG
self.jpg2size, = struct.unpack("<I",self.rawdata.read(4))
jpg2=cStringIO.StringIO()
jpg2.write(self.rawdata.read(self.jpg2size))
jpg2.seek(0)
me=Image.open(jpg2)
#me.show()
alpha=cStringIO.StringIO()
alpha.write(me.tostring())
alpha.seek(0)
del jpg2
del me
for i in range(self.w * self.h):
rx,gx,bx = struct.unpack("BBB",aux.read(3))
r,g,b = struct.unpack("BBB",alpha.read(3))
self.data.write(struct.pack("BBBB",rx,gx,bx,r))
if r!=255:
self.hasalpha=1
def fromRGBA(self):
self.rawdata=cStringIO.StringIO()
self.data.seek(0)
self.rawdata.write(struct.pack("B",self.type))
if self.type not in [0x00,0x01,0x02,0x03]:
raise "Unsupported type"
#the image
if self.type & 0x01: #RLE 1
count=0
ri=0
gi=0
bi=0
first=0
for i in range(self.w * self.h):
r,g,b,a = struct.unpack("BBBB",self.data.read(4))
if not first:
first=1
ri=r
gi=g
bi=b
if ri==r and gi==g and bi==b:
count=count+1
else:
self.rawdata.write(struct.pack("<I",count))
self.rawdata.write(struct.pack("BBBB",bi,gi,ri,0))
count=1
ri=r
gi=g
bi=b
if count:
self.rawdata.write(struct.pack("<I",count))
self.rawdata.write(struct.pack("BBBB",bi,gi,ri,0))
self.rawdata.write(struct.pack("<II",0,0))
else: #jpg 1
jpg1=cStringIO.StringIO()
me = Image.new("RGBA",(self.w,self.h))
me.fromstring(self.data.read(self.w * self.h * 4))
me.save(jpg1,"JPEG")
del me
jpg1.read()
self.jpg1size = jpg1.tell()
self.rawdata.write(struct.pack("<I",self.jpg1size))
jpg1.seek(0)
self.rawdata.write(jpg1.read())
del jpg1
self.data.seek(0)
#alpha channel
if self.type & 0x02: #RLE 2
count=0
ai=0
first=0
for i in range(self.w * self.h):
r,g,b,a = struct.unpack("BBBB",self.data.read(4))
if not first:
first=1
ai=a
if ai==a:
count=count+1
else:
self.rawdata.write(struct.pack("<I",count))
self.rawdata.write(struct.pack("BBBB",0,0,ai,0))
count=1
ai=a
if count:
self.rawdata.write(struct.pack("<I",count))
self.rawdata.write(struct.pack("BBBB",0,0,ai,0))
self.rawdata.write(struct.pack("<II",0,0))
else: #jpg 2
aux=cStringIO.StringIO()
for i in range(self.w * self.h):
r,g,b,a = struct.unpack("BBBB",self.data.read(4))
aux.write(struct.pack("BBBB",a,0,0,255))
aux.seek(0)
jpg1=cStringIO.StringIO()
me = Image.new("RGBA",(self.w,self.h))
me.fromstring(aux.read(self.w * self.h * 4))
me.save(jpg1,"JPEG")
del me
jpg1.read()
self.jpg2size = jpg1.tell()
self.rawdata.write(struct.pack("<I",self.jpg2size))
jpg1.seek(0)
self.rawdata.write(jpg1.read())
del jpg1
|
Jrius/PyPRP
|
PyPRP/prp_DXTConv.py
|
Python
|
gpl-2.0
| 22,490
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.models.test_results import TestResult
class TestResultsTest(unittest.TestCase):
def test_defaults(self):
result = TestResult("foo")
self.assertEqual(result.test_name, 'foo')
self.assertEqual(result.failures, [])
self.assertEqual(result.test_run_time, 0)
def test_loads(self):
result = TestResult(test_name='foo',
failures=[],
test_run_time=1.1)
s = result.dumps()
new_result = TestResult.loads(s)
self.assertIsInstance(new_result, TestResult)
self.assertEqual(new_result, result)
# Also check that != is implemented.
self.assertFalse(new_result != result)
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
|
Python
|
bsd-3-clause
| 2,300
|
import json
from collections import defaultdict
from constants import ROOT_FORM,ROOT_LEMMA,ROOT_POS,EMPTY
class Data():
'''represent dota instance of one sentence'''
current_sen = 1
def __init__(self):
self.tree = None
self.coreference = None
#self.dependency = []
self.text = None
self.tokens = []
self.amr = None
self.gold_graph = None
self.sentID = self.current_sen
self.comment = None
self.trace_dict = defaultdict(set)
self.tokens.append({'id':0,'form':ROOT_FORM,'lemma':ROOT_LEMMA,'pos':ROOT_POS,'ne':'O','rel':EMPTY})
@staticmethod
def newSen():
Data.current_sen += 1 # won't be pickled
#self.dependency.append([])
#self.tokens.append([])
def get_tokenized_sent(self):
return [tok['form'] for tok in self.tokens][1:]
def addTree( self, tree ):
self.tree = tree
def addText( self, sentence ):
self.text = sentence
def addToken( self, token, offset_begin, offset_end, lem, pop, ne ):
tok_inst = {}
tok_inst['id'] = len(self.tokens)
tok_inst['form'] = token
#tok_inst['offset_begin'] = offset_begin
#tok_inst['offset_end'] = offset_end
tok_inst['lemma'] = lem
tok_inst['pos'] = pop
tok_inst['ne'] = ne
tok_inst['rel'] = EMPTY
self.tokens.append(tok_inst)
def addCoref( self, coref_set):
self.coreference = coref_set
def addTrace(self, rel, gov, trace):
self.trace_dict[int(gov)].add((rel, int(trace)))
def addDependency( self, rel, l_index, r_index):
'''CoNLL dependency format'''
assert int(r_index) == self.tokens[int(r_index)]['id'] and int(l_index) == self.tokens[int(l_index)]['id']
self.tokens[int(r_index)]['head'] = int(l_index)
self.tokens[int(r_index)]['rel'] = rel
def addProp(self, prd, frmset, arg, label):
self.tokens[prd]['frmset'] = frmset
if 'args' in self.tokens[prd]:
self.tokens[prd]['args'][arg]=label
else:
self.tokens[prd]['args']={arg:label}
# bi-directional
if 'pred' in self.tokens[arg]:
self.tokens[arg]['pred'][prd]=label
else:
self.tokens[arg]['pred']={prd:label}
def addAMR(self,amr):
self.amr = amr
def addComment(self,comment):
self.comment = comment
def addGoldGraph(self,gold_graph):
self.gold_graph = gold_graph
def get_ne_span(self,tags_to_merge):
pre_ne_id = None
ne_span_dict = defaultdict(list)
for tok in self.tokens:
if tok['ne'] in tags_to_merge:
if pre_ne_id is None:
ne_span_dict[tok['id']].append(tok['id'])
pre_ne_id = tok['id']
else:
ne_span_dict[pre_ne_id].append(tok['id'])
else:
pre_ne_id = None
return ne_span_dict
def printDep(self,tagged=True):
out_str = ''
for tok in self.tokens:
if 'head' in tok:
gov_id = tok['head']
if tagged:
out_str += "%s(%s-%s:%s, %s-%s:%s)\n" % (tok['rel'], self.tokens[gov_id]['form'], gov_id, self.tokens[gov_id]['pos'], tok['form'], tok['id'], tok['pos'])
else:
out_str += "%s(%s-%s, %s-%s)\n" % (tok['rel'], self.tokens[gov_id]['form'], gov_id, tok['form'], tok['id'])
return out_str
def toJSON(self):
json = {}
json['tree'] = self.tree
json['coreference'] = self.coreference
#json['dependency'] = self.dependency
json['text'] = self.text
json['tokens'] = self.tokens
json['amr'] = self.amr
return json
## def find
|
Juicechuan/AMRParsing
|
stanfordnlp/data.py
|
Python
|
gpl-2.0
| 4,053
|
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.db import models_v2
from neutron.extensions import l3
from neutron_lib import constants as l3_constants
from neutron_lib import exceptions as n_exc
from networking_cisco._i18n import _
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.db.l3 import l3_models
from networking_cisco.plugins.cisco.db.l3.l3_router_appliance_db import (
L3RouterApplianceDBMixin)
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.plugins.cisco.l3 import drivers
LOG = logging.getLogger(__name__)
DEVICE_OWNER_GLOBAL_ROUTER_GW = cisco_constants.DEVICE_OWNER_GLOBAL_ROUTER_GW
HOSTING_DEVICE_ATTR = routerhostingdevice.HOSTING_DEVICE_ATTR
ROUTER_ROLE_GLOBAL = cisco_constants.ROUTER_ROLE_GLOBAL
ROUTER_ROLE_LOGICAL_GLOBAL = cisco_constants.ROUTER_ROLE_LOGICAL_GLOBAL
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
TENANT_HSRP_GRP_RANGE = 1
TENANT_HSRP_GRP_OFFSET = 1064
EXT_HSRP_GRP_RANGE = 1
EXT_HSRP_GRP_OFFSET = 1064
N_ROUTER_PREFIX = 'nrouter-'
DEV_NAME_LEN = 14
class TopologyNotSupportedByRouterError(n_exc.Conflict):
message = _("Requested topology cannot be supported by router.")
class ASR1kL3RouterDriver(drivers.L3RouterBaseDriver):
def create_router_precommit(self, context, router_context):
pass
def create_router_postcommit(self, context, router_context):
pass
def update_router_precommit(self, context, router_context):
pass
def update_router_postcommit(self, context, router_context):
# Whenever a gateway is added to, or removed from, a router hosted on
# a hosting device, we must ensure that a global router is running
# (for add operation) or not running (for remove operation) on that
# hosting device.
current = router_context.current
if current[HOSTING_DEVICE_ATTR] is None:
return
e_context = context.elevated()
if current['gw_port_id']:
self._conditionally_add_global_router(e_context, current)
else:
self._conditionally_remove_global_router(
e_context, router_context.original, True)
def delete_router_precommit(self, context, router_context):
pass
def delete_router_postcommit(self, context, router_context):
pass
def schedule_router_precommit(self, context, router_context):
pass
def schedule_router_postcommit(self, context, router_context):
# When the hosting device hosts a Neutron router with external
# connectivity, a "global" router (modeled as a Neutron router) must
# also run on the hosting device (outside of any VRF) to enable the
# connectivity.
current = router_context.current
if current['gw_port_id'] and current[HOSTING_DEVICE_ATTR] is not None:
self._conditionally_add_global_router(context.elevated(), current)
def unschedule_router_precommit(self, context, router_context):
pass
def unschedule_router_postcommit(self, context, router_context):
# When there is no longer any router with external gateway hosted on
# a hosting device, the global router on that hosting device can also
# be removed.
current = router_context.current
hd_id = current[HOSTING_DEVICE_ATTR]
if current['gw_port_id'] and hd_id is not None:
self._conditionally_remove_global_router(context.elevated(),
current)
def add_router_interface_precommit(self, context, r_port_context):
# Inside an ASR1k, VLAN sub-interfaces are used to connect to internal
# neutron networks. Only one such sub-interface can be created for each
# VLAN. As the VLAN sub-interface is added to the VRF representing the
# Neutron router, we must only allow one Neutron router to attach to a
# particular Neutron subnet/network.
if (r_port_context.router_context.current[routerrole.ROUTER_ROLE_ATTR]
== ROUTER_ROLE_HA_REDUNDANCY):
# redundancy routers can be exempt as we check the user visible
# routers and the request will be rejected there.
return
e_context = context.elevated()
if r_port_context.current is None:
sn = self._core_plugin.get_subnet(e_context,
r_port_context.current_subnet_id)
net_id = sn['network_id']
else:
net_id = r_port_context.current['network_id']
router_id = r_port_context.router_context.current['id']
filters = {'network_id': [net_id],
'device_owner': [bc.constants.DEVICE_OWNER_ROUTER_INTF]}
for port in self._core_plugin.get_ports(e_context, filters=filters):
device_id = port['device_id']
if device_id is None:
continue
try:
router = self._l3_plugin.get_router(e_context, device_id)
if (router[routerrole.ROUTER_ROLE_ATTR] is None and
router['id'] != router_id):
# only a single router can connect to multiple subnets
# on the same internal network
raise TopologyNotSupportedByRouterError()
except n_exc.NotFound:
if self._l3_plugin.get_ha_group(e_context, device_id):
# Since this is a port for the HA VIP address, we can
# safely ignore it
continue
else:
LOG.warning(
'Spurious router port %s prevents attachement from'
' being performed. Try attaching again later, and '
'if the operation then fails again, remove the '
'spurious port', port['id'])
raise TopologyNotSupportedByRouterError()
def add_router_interface_postcommit(self, context, r_port_context):
pass
def remove_router_interface_precommit(self, context, r_port_context):
pass
def remove_router_interface_postcommit(self, context, r_port_context):
pass
def create_floatingip_precommit(self, context, fip_context):
pass
def create_floatingip_postcommit(self, context, fip_context):
pass
def update_floatingip_precommit(self, context, fip_context):
pass
def update_floatingip_postcommit(self, context, fip_context):
pass
def delete_floatingip_precommit(self, context, fip_context):
pass
def delete_floatingip_postcommit(self, context, fip_context):
pass
def ha_interface_ip_address_needed(self, context, router, port,
ha_settings_db, ha_group_uuid):
if port['device_owner'] == bc.constants.DEVICE_OWNER_ROUTER_GW:
return False
else:
return True
def generate_ha_group_id(self, context, router, port, ha_settings_db,
ha_group_uuid):
if port['device_owner'] in {bc.constants.DEVICE_OWNER_ROUTER_GW,
DEVICE_OWNER_GLOBAL_ROUTER_GW}:
ri_name = self._router_name(router['id'])[8:DEV_NAME_LEN]
group_id = int(ri_name, 16) % TENANT_HSRP_GRP_RANGE
group_id += TENANT_HSRP_GRP_OFFSET
return group_id
else:
net_id_digits = port['network_id'][:6]
group_id = int(net_id_digits, 16) % EXT_HSRP_GRP_RANGE
group_id += EXT_HSRP_GRP_OFFSET
return group_id
def pre_backlog_processing(self, context):
LOG.info('Performing pre-backlog processing')
filters = {routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
global_routers = self._l3_plugin.get_routers(context, filters=filters)
if not global_routers:
LOG.debug("There are no global routers")
return
for gr in global_routers:
filters = {
HOSTING_DEVICE_ATTR: [gr[HOSTING_DEVICE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_HA_REDUNDANCY, None]
}
invert_filters = {'gw_port_id': [None]}
num_rtrs = self._l3_plugin.get_routers_count_extended(
context, filters=filters, invert_filters=invert_filters)
LOG.debug("Global router %(name)s[%(id)s] with hosting_device "
"%(hd)s has %(num)d routers with gw_port set on that "
"device",
{'name': gr['name'], 'id': gr['id'],
'hd': gr[HOSTING_DEVICE_ATTR], 'num': num_rtrs, })
if num_rtrs == 0:
LOG.info(
"Global router %(name)s[id:%(id)s] is present for "
"hosting device %(hd)s but there are no tenant or "
"redundancy routers with gateway set on that hosting "
"device. Proceeding to delete global router.",
{'name': gr['name'], 'id': gr['id'],
'hd': gr[HOSTING_DEVICE_ATTR]})
self._delete_global_router(context, gr['id'])
filters = {
#TODO(bmelande): Filter on routertype of global router
#routertype.TYPE_ATTR: [routertype_id],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
log_global_routers = self._l3_plugin.get_routers(
context, filters=filters)
if log_global_routers:
log_global_router_id = log_global_routers[0]['id']
self._delete_global_router(context, log_global_router_id,
logical=True)
def post_backlog_processing(self, context):
pass
# ---------------- Create workflow functions -----------------
def _conditionally_add_global_router(self, context, tenant_router):
# We could filter on hosting device id but we don't so we get all
# global routers for this router type. We can then use that count to
# determine which ha priority a new global router should get.
filters = {
routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
global_routers = self._l3_plugin.get_routers(
context, filters=filters)
hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
hosting_device_id = tenant_router[HOSTING_DEVICE_ATTR]
ext_nw_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
global_router = hd_to_gr_dict.get(hosting_device_id)
logical_global_router = self._get_logical_global_router(context,
tenant_router)
self._conditionally_add_auxiliary_external_gateway_port(
context, logical_global_router, ext_nw_id, tenant_router, True)
if global_router is None:
# must create global router on hosting device
global_router = self._create_global_router(
context, hosting_device_id, hd_to_gr_dict, tenant_router,
logical_global_router)
self._conditionally_add_auxiliary_external_gateway_port(
context, global_router, ext_nw_id, tenant_router)
self._l3_plugin.add_type_and_hosting_device_info(context,
global_router)
for ni in self._l3_plugin.get_notifiers(context, [global_router]):
if ni['notifier']:
ni['notifier'].routers_updated(context, ni['routers'])
def _conditionally_add_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, tenant_router,
provision_ha=False, port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
# tbe global router may or may not have an interface on the
# external network that the tenant router uses
filters = {
'device_id': [global_router['id']],
'device_owner': [port_type]}
ext_net_port = {
p['network_id']: p for p in
self._core_plugin.get_ports(context, filters=filters)}
if ext_net_id in ext_net_port:
# already connected to the external network, called if
# new subnets are added to the network
aux_gw_port = self._update_auxiliary_external_gateway_port(
context, global_router, ext_net_id, ext_net_port)
if provision_ha:
for subnet in aux_gw_port[ext_net_id]['fixed_ips']:
self._provision_port_ha(context, aux_gw_port[ext_net_id],
subnet, global_router)
else:
# not connected to the external network, so let's fix that
aux_gw_port = self._create_auxiliary_external_gateway_port(
context, global_router, ext_net_id, tenant_router, port_type)
if provision_ha:
for subnet in aux_gw_port['fixed_ips']:
self._provision_port_ha(context, aux_gw_port, subnet,
global_router)
def _update_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, port):
# When a new subnet is added to an external network, the auxillary
# gateway port in the global router must be updated with the new
# subnet_id so an ip from that subnet is assigned to the gateway port
ext_network = self._core_plugin.get_network(context, ext_net_id)
fixed_ips = port[ext_net_id]['fixed_ips']
# fetch the subnets the port is currently connected to
subnet_id_list = [fixedip['subnet_id'] for fixedip in fixed_ips]
# add the new subnet
for subnet_id in ext_network['subnets']:
if subnet_id not in subnet_id_list:
fixed_ip = {'subnet_id': subnet_id}
fixed_ips.append(fixed_ip)
self._core_plugin.update_port(context, port[ext_net_id]['id'],
({'port': {'fixed_ips':
fixed_ips}}))
return port
def _create_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, tenant_router,
port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
# When a global router is connected to an external network then a
# special type of gateway port is created on that network. Such a
# port is called auxiliary gateway ports. It has an ip address on
# each subnet of the external network. A (logical) global router
# never has a traditional Neutron gateway port.
filters = {
'device_id': [tenant_router['id']],
'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_GW]}
# fetch the gateway port of the *tenant* router so we can determine
# the CIDR of that port's subnet
gw_port = self._core_plugin.get_ports(context,
filters=filters)[0]
fixed_ips = self._get_fixed_ips_subnets(context, gw_port)
global_router_id = global_router['id']
aux_gw_port = self._core_plugin.create_port(context, {
'port': {
'tenant_id': '', # intentionally not set
'network_id': ext_net_id,
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': global_router_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}})
router_port = bc.RouterPort(
port_id=aux_gw_port['id'],
router_id=global_router_id,
port_type=port_type)
context.session.add(router_port)
return aux_gw_port
def _create_global_router(
self, context, hosting_device_id, hd_to_gr_dict, tenant_router,
logical_global_router):
r_spec = {'router': {
# global routers are not tied to any tenant
'tenant_id': '',
'name': self._global_router_name(hosting_device_id),
'admin_state_up': True}}
global_router, r_hd_b_db = self._l3_plugin.do_create_router(
context, r_spec, tenant_router[routertype.TYPE_ATTR], False,
True, hosting_device_id, ROUTER_ROLE_GLOBAL)
# make the global router a redundancy router for the logical
# global router (which we treat as a hidden "user visible
# router" (how's that for a contradiction of terms! :-) )
with context.session.begin(subtransactions=True):
ha_priority = (
ha_db.DEFAULT_MASTER_PRIORITY -
len(hd_to_gr_dict) * ha_db.PRIORITY_INCREASE_STEP)
r_b_b = ha_db.RouterRedundancyBinding(
redundancy_router_id=global_router['id'],
priority=ha_priority,
user_router_id=logical_global_router['id'])
context.session.add(r_b_b)
return global_router
def _get_logical_global_router(self, context, tenant_router):
# Since HA is also enabled on the global routers on each hosting device
# those global routers need HA settings and VIPs. We represent that
# using a Neutron router that is never instantiated/hosted. That
# Neutron router is referred to as the "logical global" router.
filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
logical_global_routers = self._l3_plugin.get_routers(
context, filters=filters)
if not logical_global_routers:
# must create logical global router
logical_global_router = self._create_logical_global_router(
context, tenant_router)
else:
logical_global_router = logical_global_routers[0]
self._update_ha_redundancy_level(context, logical_global_router, 1)
return logical_global_router
def _create_logical_global_router(self, context, tenant_router):
r_spec = {'router': {
# global routers are not tied to any tenant
'tenant_id': '',
'name': self._global_router_name('', logical=True),
'admin_state_up': True,
# set auto-schedule to false to keep this router un-hosted
routertypeawarescheduler.AUTO_SCHEDULE_ATTR: False}}
# notifications should never be sent for this logical router!
logical_global_router, r_hd_b_db = (
self._l3_plugin.do_create_router(
context, r_spec, tenant_router[routertype.TYPE_ATTR],
False, True, None, ROUTER_ROLE_LOGICAL_GLOBAL))
with context.session.begin(subtransactions=True):
r_ha_s_db = ha_db.RouterHASetting(
router_id=logical_global_router['id'],
ha_type=cfg.CONF.ha.default_ha_mechanism,
redundancy_level=1,
priority=ha_db.DEFAULT_MASTER_PRIORITY,
probe_connectivity=False,
probe_target=None,
probe_interval=None)
context.session.add(r_ha_s_db)
return logical_global_router
def _get_fixed_ips_subnets(self, context, gw_port):
nw = self._core_plugin.get_network(context, gw_port['network_id'])
subnets = [{'subnet_id': s} for s in nw['subnets']]
return subnets
def _provision_port_ha(self, context, ha_port, subnet, router,
ha_binding_db=None):
ha_group_uuid = uuidutils.generate_uuid()
router_id = router['id']
with context.session.begin(subtransactions=True):
ha_subnet_group = self._get_ha_group_by_ha_port_subnet_id(
context, ha_port['id'], subnet['subnet_id'])
if ha_subnet_group is not None:
return
if ha_binding_db is None:
ha_binding_db = self._get_ha_binding(context, router_id)
group_id = self.generate_ha_group_id(
context, router,
{'device_owner': DEVICE_OWNER_GLOBAL_ROUTER_GW}, ha_binding_db,
ha_group_uuid)
r_ha_g = ha_db.RouterHAGroup(
id=ha_group_uuid,
tenant_id='',
ha_type=ha_binding_db.ha_type,
group_identity=group_id,
ha_port_id=ha_port['id'],
extra_port_id=None,
subnet_id=subnet['subnet_id'],
user_router_id=router_id,
timers_config='',
tracking_config='',
other_config='')
context.session.add(r_ha_g)
def _get_ha_binding(self, context, router_id):
with context.session.begin(subtransactions=True):
query = context.session.query(ha_db.RouterHASetting)
query = query.filter(
ha_db.RouterHASetting.router_id == router_id)
return query.first()
def _get_ha_group_by_ha_port_subnet_id(self, context, port_id, subnet_id):
with context.session.begin(subtransactions=True):
query = context.session.query(ha_db.RouterHAGroup)
query = query.filter(ha_db.RouterHAGroup.ha_port_id == port_id,
ha_db.RouterHAGroup.subnet_id == subnet_id)
try:
r_ha_g = query.one()
except (exc.NoResultFound, exc.MultipleResultsFound):
return
return r_ha_g
# ---------------- Remove workflow functions -----------------
def _conditionally_remove_global_router(self, context, tenant_router,
update_operation=False):
filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL],
HOSTING_DEVICE_ATTR: [tenant_router[HOSTING_DEVICE_ATTR]]}
global_routers = self._l3_plugin.get_routers(context,
filters=filters)
hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
if global_routers:
global_router_id = global_routers[0]['id']
if not tenant_router or not tenant_router[l3.EXTERNAL_GW_INFO]:
# let l3 plugin's periodic backlog processing take care of the
# clean up of the global router
return
ext_net_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
routertype_id = tenant_router[routertype.TYPE_ATTR]
hd_id = tenant_router[HOSTING_DEVICE_ATTR]
global_router = hd_to_gr_dict.get(hd_id)
port_deleted = self._conditionally_remove_auxiliary_gateway_port(
context, global_router_id, ext_net_id, routertype_id, hd_id,
update_operation)
if port_deleted is False:
# since no auxiliary gateway port was deleted we can
# abort no since auxiliary gateway port count cannot
# have reached zero
return
filters = {
'device_id': [global_router_id],
'device_owner': [DEVICE_OWNER_GLOBAL_ROUTER_GW]}
num_aux_gw_ports = self._core_plugin.get_ports_count(
context, filters=filters)
if num_aux_gw_ports == 0:
# global router not needed any more so we delete it
self._delete_global_router(context, global_router_id)
do_notify = False
else:
do_notify = True
# process logical global router to remove its port
self._conditionally_remove_auxiliary_gateway_vip_port(
context, ext_net_id, routertype_id)
self._l3_plugin.add_type_and_hosting_device_info(context,
global_router)
if do_notify is True:
for ni in self._l3_plugin.get_notifiers(context,
[global_router]):
if ni['notifier']:
ni['notifier'].routers_updated(context, ni['routers'])
def _conditionally_remove_auxiliary_gateway_port(
self, context, router_id, ext_net_id, routertype_id,
hosting_device_id, update_operation=False):
num_rtrs = self._get_gateway_routers_count(
context, ext_net_id, routertype_id, None, hosting_device_id)
if ((num_rtrs <= 1 and update_operation is False) or
(num_rtrs == 0 and update_operation is True)):
# there are no tenant routers *on ext_net_id* that are serviced by
# this global router so it's aux gw port can be deleted
self._delete_auxiliary_gateway_ports(context, router_id,
ext_net_id)
return True
return False
def _conditionally_remove_auxiliary_gateway_vip_port(
self, context, ext_net_id, routertype_id):
filters = {routertype.TYPE_ATTR: [routertype_id],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
log_global_routers = self._l3_plugin.get_routers(context,
filters=filters)
if not log_global_routers:
return
self._update_ha_redundancy_level(context, log_global_routers[0], -1)
log_global_router_id = log_global_routers[0]['id']
num_global_rtrs = self._get_gateway_routers_count(
context, ext_net_id, routertype_id, ROUTER_ROLE_GLOBAL)
if num_global_rtrs == 0:
# there are no global routers *on ext_net_id* that are serviced by
# this logical global router so it's aux gw VIP port can be deleted
self._delete_auxiliary_gateway_ports(context, log_global_router_id,
ext_net_id)
filters[routerrole.ROUTER_ROLE_ATTR] = [ROUTER_ROLE_GLOBAL]
total_num_global_rtrs = self._l3_plugin.get_routers_count(
context, filters=filters)
if total_num_global_rtrs == 0:
# there are no global routers left that are serviced by this
# logical global router so it can be deleted
self._delete_global_router(context, log_global_router_id, True)
return False
def _delete_auxiliary_gateway_ports(
self, context, router_id, net_id=None,
port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
filters = {
'device_id': [router_id],
'device_owner': [port_type]}
if net_id is not None:
filters['network_id'] = [net_id]
for port in self._core_plugin.get_ports(context, filters=filters):
try:
self._core_plugin.delete_port(context, port['id'],
l3_port_check=False)
except (exc.ObjectDeletedError, n_exc.PortNotFound) as e:
LOG.info('Unable to delete port for Global router '
'%(r_id)s. It has likely been concurrently '
'deleted. %(err)s', {'r_id': router_id,
'err': e})
def _delete_global_router(self, context, global_router_id, logical=False):
# ensure we clean up any stale auxiliary gateway ports
self._delete_auxiliary_gateway_ports(context, global_router_id)
try:
if logical is True:
# We use parent class method as no special operations beyond
# what the base implemenation does are needed for logical
# global router
super(L3RouterApplianceDBMixin, self._l3_plugin).delete_router(
context, global_router_id)
else:
self._l3_plugin.delete_router(
context, global_router_id, unschedule=False)
except (exc.ObjectDeletedError, l3.RouterNotFound) as e:
g_r_type = 'Logical Global' if logical is True else 'Global'
LOG.info('Unable to delete %(g_r_type)s router %(r_id)s. It '
'has likely been concurrently deleted. %(err)s',
{'g_r_type': g_r_type, 'r_id': global_router_id,
'err': e})
except Exception as e:
g_r_type = 'Logical Global' if logical is True else 'Global'
LOG.debug('Failed to delete %(g_r_type)s router %(r_id). It may '
'have been deleted concurrently. Error details: '
'%(err)s',
{'g_r_type': g_r_type, 'r_id': global_router_id,
'err': e})
def _get_gateway_routers_count(self, context, ext_net_id, routertype_id,
router_role, hosting_device_id=None):
# Determine number of routers (with routertype_id and router_role)
# that act as gateway to ext_net_id and that are hosted on
# hosting_device_id (if specified).
query = context.session.query(bc.Router)
if router_role in [None, ROUTER_ROLE_HA_REDUNDANCY]:
# tenant router roles
query = query.join(models_v2.Port,
models_v2.Port.id == bc.Router.gw_port_id)
role_filter = expr.or_(
l3_models.RouterHostingDeviceBinding.role == expr.null(),
l3_models.RouterHostingDeviceBinding.role ==
ROUTER_ROLE_HA_REDUNDANCY)
else:
# global and logical global routers
query = query.join(models_v2.Port,
models_v2.Port.device_owner == bc.Router.id)
role_filter = (
l3_models.RouterHostingDeviceBinding.role == router_role)
query = query.join(
l3_models.RouterHostingDeviceBinding,
l3_models.RouterHostingDeviceBinding.router_id == bc.Router.id)
query = query.filter(
role_filter,
models_v2.Port.network_id == ext_net_id,
l3_models.RouterHostingDeviceBinding.router_type_id ==
routertype_id)
if hosting_device_id is not None:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
hosting_device_id)
return query.count()
# ---------------- General support functions -----------------
def _update_ha_redundancy_level(self, context, logical_global_router,
delta):
with context.session.begin(subtransactions=True):
log_g_router_db = self._l3_plugin._get_router(
context, logical_global_router['id'])
log_g_router_db.ha_settings.redundancy_level += delta
context.session.add(log_g_router_db.ha_settings)
def _router_name(self, router_id):
return N_ROUTER_PREFIX + router_id
def _global_router_name(self, hosting_device_id, logical=False):
if logical is True:
return cisco_constants.LOGICAL_ROUTER_ROLE_NAME
else:
return '%s-%s' % (cisco_constants.ROUTER_ROLE_NAME_PREFIX,
hosting_device_id[-cisco_constants.ROLE_ID_LEN:])
@property
def _core_plugin(self):
return bc.get_plugin()
@property
def _l3_plugin(self):
return bc.get_plugin(bc.constants.L3)
|
Tehsmash/networking-cisco
|
networking_cisco/plugins/cisco/l3/drivers/asr1k/asr1k_routertype_driver.py
|
Python
|
apache-2.0
| 33,133
|
#!/usr/bin/env python -i
# -*- mode: Python; tab-width: 4; indent-tabs-mode: nil; -*-
# ex: set tabstop=4
# Please do not change the two lines above. See PEP 8, PEP 263.
'''Provides an interactive console with pytan available as handler'''
__author__ = 'Jim Olsen <jim.olsen@tanium.com>'
__version__ = '2.1.5'
import os
import sys
sys.dont_write_bytecode = True
my_file = os.path.abspath(sys.argv[0])
my_name = os.path.splitext(os.path.basename(my_file))[0]
my_dir = os.path.dirname(my_file)
parent_dir = os.path.dirname(my_dir)
lib_dir = os.path.join(parent_dir, 'lib')
path_adds = [lib_dir]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
import pytan
import pytan.binsupport
if __name__ == "__main__":
pytan.binsupport.version_check(reqver=__version__)
setupmethod = getattr(pytan.binsupport, 'setup_{}_argparser'.format(my_name))
responsemethod = getattr(pytan.binsupport, 'process_{}_args'.format(my_name))
parser = setupmethod(doc=__doc__)
args = parser.parse_args()
handler = pytan.binsupport.process_handler_args(parser=parser, args=args)
response = responsemethod(parser=parser, handler=handler, args=args)
|
tanium/pytan
|
bin/pytan_shell.py
|
Python
|
mit
| 1,167
|
"""
Waveform pseudo-type functions.
[[[cog
import os, sys
if sys.hexversion < 0x03000000:
import __builtin__
else:
import builtins as __builtin__
sys.path.append(os.environ['TRACER_DIR'])
import trace_ex_eng_wave_functions
exobj_eng = trace_ex_eng_wave_functions.trace_module(no_print=True)
]]]
[[[end]]]
"""
# wave_functions.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,C0413,E1101,E1111,R0913,W0212
# Standard library imports
import copy
import math
import os
import warnings
# PyPI imports
if os.environ.get("READTHEDOCS", "") != "True":
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
import pexdoc.exh
import pexdoc.pcontracts
# Intra-package imports imports
from .functions import remove_extra_delims
from .constants import FP_ATOL, FP_RTOL
from .wave_core import _interp_dep_vector, Waveform
###
# Functions
###
def _barange(bmin, bmax, inc):
vector = np.arange(bmin, bmax + inc, inc)
vector = vector if np.isclose(bmax, vector[-1], FP_RTOL, FP_ATOL) else vector[:-1]
return vector
def _bound_waveform(wave, indep_min, indep_max):
"""Add independent variable vector bounds if they are not in vector."""
indep_min, indep_max = _validate_min_max(wave, indep_min, indep_max)
indep_vector = copy.copy(wave._indep_vector)
if (
isinstance(indep_min, float) or isinstance(indep_max, float)
) and indep_vector.dtype.name.startswith("int"):
indep_vector = indep_vector.astype(float)
min_pos = np.searchsorted(indep_vector, indep_min)
if not np.isclose(indep_min, indep_vector[min_pos], FP_RTOL, FP_ATOL):
indep_vector = np.insert(indep_vector, min_pos, indep_min)
max_pos = np.searchsorted(indep_vector, indep_max)
if not np.isclose(indep_max, indep_vector[max_pos], FP_RTOL, FP_ATOL):
indep_vector = np.insert(indep_vector, max_pos, indep_max)
dep_vector = _interp_dep_vector(wave, indep_vector)
wave._indep_vector = indep_vector[min_pos : max_pos + 1]
wave._dep_vector = dep_vector[min_pos : max_pos + 1]
def _build_units(indep_units, dep_units, op):
"""Build unit math operations."""
if (not dep_units) and (not indep_units):
return ""
if dep_units and (not indep_units):
return dep_units
if (not dep_units) and indep_units:
return (
remove_extra_delims("1{0}({1})".format(op, indep_units))
if op == "/"
else remove_extra_delims("({0})".format(indep_units))
)
return remove_extra_delims("({0}){1}({2})".format(dep_units, op, indep_units))
def _operation(wave, desc, units, fpointer):
"""Perform generic operation on a waveform object."""
ret = copy.copy(wave)
ret.dep_units = units
ret.dep_name = "{0}({1})".format(desc, ret.dep_name)
ret._dep_vector = fpointer(ret._dep_vector)
return ret
def _running_area(indep_vector, dep_vector):
"""Calculate running area under curve."""
rect_height = np.minimum(dep_vector[:-1], dep_vector[1:])
rect_base = np.diff(indep_vector)
rect_area = np.multiply(rect_height, rect_base)
triang_height = np.abs(np.diff(dep_vector))
triang_area = 0.5 * np.multiply(triang_height, rect_base)
return np.cumsum(np.concatenate((np.array([0.0]), triang_area + rect_area)))
def _validate_min_max(wave, indep_min, indep_max):
"""Validate min and max bounds are within waveform's independent variable vector."""
imin, imax = False, False
if indep_min is None:
indep_min = wave._indep_vector[0]
imin = True
if indep_max is None:
indep_max = wave._indep_vector[-1]
imax = True
if imin and imax:
return indep_min, indep_max
exminmax = pexdoc.exh.addex(
RuntimeError, "Incongruent `indep_min` and `indep_max` arguments"
)
exmin = pexdoc.exh.addai("indep_min")
exmax = pexdoc.exh.addai("indep_max")
exminmax(bool(indep_min >= indep_max))
exmin(
bool(
(indep_min < wave._indep_vector[0])
and (not np.isclose(indep_min, wave._indep_vector[0], FP_RTOL, FP_ATOL))
)
)
exmax(
bool(
(indep_max > wave._indep_vector[-1])
and (not np.isclose(indep_max, wave._indep_vector[-1], FP_RTOL, FP_ATOL))
)
)
return indep_min, indep_max
@pexdoc.pcontracts.contract(wave=Waveform)
def acos(wave):
r"""
Return the arc cosine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.acos
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(
ValueError,
"Math domain error",
bool((min(wave._dep_vector) < -1) or (max(wave._dep_vector) > 1)),
)
return _operation(wave, "acos", "rad", np.arccos)
@pexdoc.pcontracts.contract(wave=Waveform)
def acosh(wave):
r"""
Return the hyperbolic arc cosine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.acosh
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(ValueError, "Math domain error", bool(min(wave._dep_vector) < 1))
return _operation(wave, "acosh", "", np.arccosh)
@pexdoc.pcontracts.contract(wave=Waveform)
def asin(wave):
r"""
Return the arc sine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.asin
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(
ValueError,
"Math domain error",
bool((min(wave._dep_vector) < -1) or (max(wave._dep_vector) > 1)),
)
return _operation(wave, "asin", "rad", np.arcsin)
@pexdoc.pcontracts.contract(wave=Waveform)
def asinh(wave):
r"""
Return the hyperbolic arc sine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.asinh
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "asinh", "", np.arcsinh)
@pexdoc.pcontracts.contract(wave=Waveform)
def atan(wave):
r"""
Return the arc tangent of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.atan
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "atan", "rad", np.arctan)
@pexdoc.pcontracts.contract(wave=Waveform)
def atanh(wave):
r"""
Return the hyperbolic arc tangent of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.atanh
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(
ValueError,
"Math domain error",
bool((min(wave._dep_vector) < -1) or (max(wave._dep_vector) > 1)),
)
return _operation(wave, "atanh", "", np.arctanh)
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def average(wave, indep_min=None, indep_max=None):
r"""
Return the running average of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.average
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
area = _running_area(ret._indep_vector, ret._dep_vector)
area[0] = ret._dep_vector[0]
deltas = ret._indep_vector - ret._indep_vector[0]
deltas[0] = 1.0
ret._dep_vector = np.divide(area, deltas)
ret.dep_name = "average({0})".format(ret._dep_name)
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def ceil(wave):
r"""
Return the ceiling of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ceil
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "ceil", wave.dep_units, np.ceil)
@pexdoc.pcontracts.contract(wave=Waveform)
def cos(wave):
r"""
Return the cosine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.cos
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "cos", "", np.cos)
@pexdoc.pcontracts.contract(wave=Waveform)
def cosh(wave):
r"""
Return the hyperbolic cosine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.cosh
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "cosh", "", np.cosh)
@pexdoc.pcontracts.contract(wave=Waveform)
def db(wave):
r"""
Return a waveform's dependent variable vector expressed in decibels.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.db
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(
ValueError, "Math domain error", bool((np.min(np.abs(wave._dep_vector)) <= 0))
)
ret = copy.copy(wave)
ret.dep_units = "dB"
ret.dep_name = "db({0})".format(ret.dep_name)
ret._dep_vector = 20.0 * np.log10(np.abs(ret._dep_vector))
return ret
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def derivative(wave, indep_min=None, indep_max=None):
r"""
Return the numerical derivative of a waveform's dependent variable vector.
The method used is the `backwards differences
<https://en.wikipedia.org/wiki/
Finite_difference#Forward.2C_backward.2C_and_central_differences>`_ method
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.derivative
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
delta_indep = np.diff(ret._indep_vector)
delta_dep = np.diff(ret._dep_vector)
delta_indep = np.concatenate((np.array([delta_indep[0]]), delta_indep))
delta_dep = np.concatenate((np.array([delta_dep[0]]), delta_dep))
ret._dep_vector = np.divide(delta_dep, delta_indep)
ret.dep_name = "derivative({0})".format(ret._dep_name)
ret.dep_units = _build_units(ret.indep_units, ret.dep_units, "/")
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def exp(wave):
r"""
Return the natural exponent of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.exp
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "exp", "", np.exp)
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def fft(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.fft
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
npoints = npoints or ret._indep_vector.size
fs = (npoints - 1) / float(ret._indep_vector[-1])
spoints = min(ret._indep_vector.size, npoints)
sdiff = np.diff(ret._indep_vector[:spoints])
cond = not np.all(
np.isclose(sdiff, sdiff[0] * np.ones(spoints - 1), FP_RTOL, FP_ATOL)
)
pexdoc.addex(RuntimeError, "Non-uniform sampling", cond)
finc = fs / float(npoints - 1)
indep_vector = _barange(-fs / 2.0, +fs / 2.0, finc)
dep_vector = np.fft.fft(ret._dep_vector, npoints)
return Waveform(
indep_vector=indep_vector,
dep_vector=dep_vector,
dep_name="fft({0})".format(ret.dep_name),
indep_scale="LINEAR",
dep_scale="LINEAR",
indep_units="Hz",
dep_units="",
)
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def fftdb(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the Fast Fourier Transform of a waveform.
The dependent variable vector of the returned waveform is expressed in decibels
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftdb
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return db(fft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def ffti(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the imaginary part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ffti
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return imag(fft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def fftm(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the magnitude of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftm
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return abs(fft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
unwrap=bool,
rad=bool,
)
def fftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True):
r"""
Return the phase of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftp
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return phase(fft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad)
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def fftr(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the real part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftr
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return real(fft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
dep_var="number",
der="None|(int,>=-1,<=+1)",
inst="int,>0",
indep_min="None|number",
indep_max="None|number",
)
def find(wave, dep_var, der=None, inst=1, indep_min=None, indep_max=None):
r"""
Return the independent variable point associated with a dependent variable point.
If the dependent variable point is not in the dependent variable vector the
independent variable vector point is obtained by linear interpolation
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param dep_var: Dependent vector value to search for
:type dep_var: integer, float or complex
:param der: Dependent vector derivative filter. If +1 only independent
vector points that have positive derivatives when crossing
the requested dependent vector point are returned; if -1 only
independent vector points that have negative derivatives when
crossing the requested dependent vector point are returned;
if 0 only independent vector points that have null derivatives
when crossing the requested dependent vector point are
returned; otherwise if None all independent vector points are
returned regardless of the dependent vector derivative. The
derivative of the first and last point of the waveform is
assumed to be null
:type der: integer, float or complex
:param inst: Instance number filter. If, for example, **inst** equals 3,
then the independent variable vector point at which the
dependent variable vector equals the requested value for the
third time is returned
:type inst: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: integer, float or None if the dependent variable point is not found
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.find
:raises:
* RuntimeError (Argument \`dep_var\` is not valid)
* RuntimeError (Argument \`der\` is not valid)
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`inst\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
# pylint: disable=C0325,R0914,W0613
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
close_min = np.isclose(min(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL)
close_max = np.isclose(max(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL)
if ((np.amin(ret._dep_vector) > dep_var) and (not close_min)) or (
(np.amax(ret._dep_vector) < dep_var) and (not close_max)
):
return None
cross_wave = ret._dep_vector - dep_var
sign_wave = np.sign(cross_wave)
exact_idx = np.where(np.isclose(ret._dep_vector, dep_var, FP_RTOL, FP_ATOL))[0]
# Locations where dep_vector crosses dep_var or it is equal to it
left_idx = np.where(np.diff(sign_wave))[0]
# Remove elements to the left of exact matches
left_idx = np.setdiff1d(left_idx, exact_idx)
left_idx = np.setdiff1d(left_idx, exact_idx - 1)
right_idx = left_idx + 1 if left_idx.size else np.array([])
indep_var = ret._indep_vector[exact_idx] if exact_idx.size else np.array([])
dvector = np.zeros(exact_idx.size).astype(int) if exact_idx.size else np.array([])
if left_idx.size and (ret.interp == "STAIRCASE"):
idvector = (
2.0 * (ret._dep_vector[right_idx] > ret._dep_vector[left_idx]).astype(int)
- 1
)
if indep_var.size:
indep_var = np.concatenate((indep_var, ret._indep_vector[right_idx]))
dvector = np.concatenate((dvector, idvector))
sidx = np.argsort(indep_var)
indep_var = indep_var[sidx]
dvector = dvector[sidx]
else:
indep_var = ret._indep_vector[right_idx]
dvector = idvector
elif left_idx.size:
y_left = ret._dep_vector[left_idx]
y_right = ret._dep_vector[right_idx]
x_left = ret._indep_vector[left_idx]
x_right = ret._indep_vector[right_idx]
slope = ((y_left - y_right) / (x_left - x_right)).astype(float)
# y = y0+slope*(x-x0) => x0+(y-y0)/slope
if indep_var.size:
indep_var = np.concatenate(
(indep_var, x_left + ((dep_var - y_left) / slope))
)
dvector = np.concatenate((dvector, np.where(slope > 0, 1, -1)))
sidx = np.argsort(indep_var)
indep_var = indep_var[sidx]
dvector = dvector[sidx]
else:
indep_var = x_left + ((dep_var - y_left) / slope)
dvector = np.where(slope > 0, +1, -1)
if der is not None:
indep_var = np.extract(dvector == der, indep_var)
return indep_var[inst - 1] if inst <= indep_var.size else None
@pexdoc.pcontracts.contract(wave=Waveform)
def floor(wave):
r"""
Return the floor of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.floor
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "floor", wave.dep_units, np.floor)
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def ifft(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ifft
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
npoints = npoints or ret._indep_vector.size
spoints = min(ret._indep_vector.size, npoints)
sdiff = np.diff(ret._indep_vector[:spoints])
finc = sdiff[0]
cond = not np.all(np.isclose(sdiff, finc * np.ones(spoints - 1), FP_RTOL, FP_ATOL))
pexdoc.addex(RuntimeError, "Non-uniform frequency spacing", cond)
fs = (npoints - 1) * finc
tinc = 1 / float(fs)
tend = 1 / float(finc)
indep_vector = _barange(0, tend, tinc)
dep_vector = np.fft.ifft(ret._dep_vector, npoints)
return Waveform(
indep_vector=indep_vector,
dep_vector=dep_vector,
dep_name="ifft({0})".format(ret.dep_name),
indep_scale="LINEAR",
dep_scale="LINEAR",
indep_units="sec",
dep_units="",
)
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def ifftdb(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the inverse Fast Fourier Transform of a waveform.
The dependent variable vector of the returned waveform is expressed in decibels
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ifftdb
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
return db(ifft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def iffti(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the imaginary part of the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.iffti
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
return imag(ifft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def ifftm(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the magnitude of the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ifftm
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
return abs(ifft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
unwrap=bool,
rad=bool,
)
def ifftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True):
r"""
Return the phase of the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ifftp
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
return phase(ifft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad)
@pexdoc.pcontracts.contract(
wave=Waveform,
npoints="None|(int,>=1)",
indep_min="None|number",
indep_max="None|number",
)
def ifftr(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the real part of the inverse Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.ifftr
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform frequency spacing)
.. [[[end]]]
"""
return real(ifft(wave, npoints, indep_min, indep_max))
@pexdoc.pcontracts.contract(wave=Waveform)
def imag(wave):
r"""
Return the imaginary part of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.imag
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "imag", wave.dep_units, np.imag)
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def integral(wave, indep_min=None, indep_max=None):
r"""
Return the running integral of a waveform's dependent variable vector.
The method used is the `trapezoidal
<https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ method
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.integral
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
ret._dep_vector = _running_area(ret._indep_vector, ret._dep_vector)
ret.dep_name = "integral({0})".format(ret._dep_name)
ret.dep_units = _build_units(ret.indep_units, ret.dep_units, "*")
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def group_delay(wave):
r"""
Return the group delay of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.group_delay
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
ret = -derivative(phase(wave, unwrap=True) / (2 * math.pi))
ret.dep_name = "group_delay({0})".format(wave.dep_name)
ret.dep_units = "sec"
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def log(wave):
r"""
Return the natural logarithm of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.log
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(
ValueError, "Math domain error", bool((min(wave._dep_vector) <= 0))
)
return _operation(wave, "log", "", np.log)
@pexdoc.pcontracts.contract(wave=Waveform)
def log10(wave):
r"""
Return the base 10 logarithm of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.log10
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Math domain error)
.. [[[end]]]
"""
pexdoc.exh.addex(
ValueError, "Math domain error", bool((min(wave._dep_vector) <= 0))
)
return _operation(wave, "log10", "", np.log10)
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def naverage(wave, indep_min=None, indep_max=None):
r"""
Return the numerical average of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.naverage
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
delta_x = ret._indep_vector[-1] - ret._indep_vector[0]
return np.trapz(ret._dep_vector, x=ret._indep_vector) / delta_x
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def nintegral(wave, indep_min=None, indep_max=None):
r"""
Return the numerical integral of a waveform's dependent variable vector.
The method used is the `trapezoidal
<https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ method
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.nintegral
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
return np.trapz(ret._dep_vector, ret._indep_vector)
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def nmax(wave, indep_min=None, indep_max=None):
r"""
Return the maximum of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.nmax
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
return np.max(ret._dep_vector)
@pexdoc.pcontracts.contract(
wave=Waveform, indep_min="None|number", indep_max="None|number"
)
def nmin(wave, indep_min=None, indep_max=None):
r"""
Return the minimum of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.nmin
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
return np.min(ret._dep_vector)
@pexdoc.pcontracts.contract(wave=Waveform, unwrap=bool, rad=bool)
def phase(wave, unwrap=True, rad=True):
r"""
Return the phase of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.phase
:raises:
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
ret = copy.copy(wave)
ret.dep_units = "rad" if rad else "deg"
ret.dep_name = "phase({0})".format(ret.dep_name)
ret._dep_vector = (
np.unwrap(np.angle(ret._dep_vector)) if unwrap else np.angle(ret._dep_vector)
)
if not rad:
ret._dep_vector = np.rad2deg(ret._dep_vector)
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def real(wave):
r"""
Return the real part of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.real
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "real", wave.dep_units, np.real)
@pexdoc.pcontracts.contract(wave=Waveform, decimals="int,>=0")
def round(wave, decimals=0):
r"""
Round a waveform's dependent variable vector to a given number of decimal places.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param decimals: Number of decimals to round to
:type decimals: integer
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.round
:raises:
* RuntimeError (Argument \`decimals\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
# pylint: disable=W0622
pexdoc.exh.addex(
TypeError,
"Cannot convert complex to integer",
wave._dep_vector.dtype.name.startswith("complex"),
)
ret = copy.copy(wave)
ret.dep_name = "round({0}, {1})".format(ret.dep_name, decimals)
ret._dep_vector = np.round(wave._dep_vector, decimals)
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def sin(wave):
r"""
Return the sine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.sin
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "sin", "", np.sin)
@pexdoc.pcontracts.contract(wave=Waveform)
def sinh(wave):
r"""
Return the hyperbolic sine of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.sinh
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "sinh", "", np.sinh)
@pexdoc.pcontracts.contract(wave=Waveform)
def sqrt(wave):
r"""
Return the square root of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.sqrt
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
dep_units = "{0}**0.5".format(wave.dep_units)
return _operation(wave, "sqrt", dep_units, np.sqrt)
@pexdoc.pcontracts.contract(
wave=Waveform,
dep_name="str|None",
indep_min="None|number",
indep_max="None|number",
indep_step="None|number",
)
def subwave(wave, dep_name=None, indep_min=None, indep_max=None, indep_step=None):
r"""
Return a waveform that is a sub-set of a waveform, potentially re-sampled.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param dep_name: Independent variable name
:type dep_name: `NonNullString <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#nonnullstring>`_
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param indep_step: Independent vector step
:type indep_step: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.subwave
:raises:
* RuntimeError (Argument \`dep_name\` is not valid)
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`indep_step\` is greater than independent
vector range)
* RuntimeError (Argument \`indep_step\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
if dep_name is not None:
ret.dep_name = dep_name
_bound_waveform(ret, indep_min, indep_max)
pexdoc.addai("indep_step", bool((indep_step is not None) and (indep_step <= 0)))
exmsg = "Argument `indep_step` is greater than independent vector range"
cond = bool(
(indep_step is not None)
and (indep_step > ret._indep_vector[-1] - ret._indep_vector[0])
)
pexdoc.addex(RuntimeError, exmsg, cond)
if indep_step:
indep_vector = _barange(indep_min, indep_max, indep_step)
dep_vector = _interp_dep_vector(ret, indep_vector)
ret._set_indep_vector(indep_vector, check=False)
ret._set_dep_vector(dep_vector, check=False)
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def tan(wave):
r"""
Return the tangent of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.tan
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "tan", "", np.tan)
@pexdoc.pcontracts.contract(wave=Waveform)
def tanh(wave):
r"""
Return the hyperbolic tangent of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.tanh
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
return _operation(wave, "tanh", "", np.tanh)
@pexdoc.pcontracts.contract(wave=Waveform)
def wcomplex(wave):
r"""
Convert a waveform's dependent variable vector to complex.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.wcomplex
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
ret = copy.copy(wave)
ret._dep_vector = ret._dep_vector.astype(np.complex)
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def wfloat(wave):
r"""
Convert a waveform's dependent variable vector to float.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.wfloat
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* TypeError (Cannot convert complex to float)
.. [[[end]]]
"""
pexdoc.exh.addex(
TypeError,
"Cannot convert complex to float",
wave._dep_vector.dtype.name.startswith("complex"),
)
ret = copy.copy(wave)
ret._dep_vector = ret._dep_vector.astype(np.float)
return ret
@pexdoc.pcontracts.contract(wave=Waveform)
def wint(wave):
r"""
Convert a waveform's dependent variable vector to integer.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.wint
:raises:
* RuntimeError (Argument \`wave\` is not valid)
* TypeError (Cannot convert complex to integer)
.. [[[end]]]
"""
pexdoc.exh.addex(
TypeError,
"Cannot convert complex to integer",
wave._dep_vector.dtype.name.startswith("complex"),
)
ret = copy.copy(wave)
ret._dep_vector = ret._dep_vector.astype(np.int)
return ret
@pexdoc.pcontracts.contract(wave=Waveform, indep_var="number")
def wvalue(wave, indep_var):
r"""
Return the dependent variable value at a given independent variable point.
If the independent variable point is not in the independent variable vector
the dependent variable value is obtained by linear interpolation
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_var: Independent variable point for which the dependent
variable is to be obtained
:type indep_var: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.wvalue
:raises:
* RuntimeError (Argument \`indep_var\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* ValueError (Argument \`indep_var\` is not in the independent
variable vector range)
.. [[[end]]]
"""
close_min = np.isclose(indep_var, wave._indep_vector[0], FP_RTOL, FP_ATOL)
close_max = np.isclose(indep_var, wave._indep_vector[-1], FP_RTOL, FP_ATOL)
pexdoc.exh.addex(
ValueError,
"Argument `indep_var` is not in the independent variable vector range",
bool(
((indep_var < wave._indep_vector[0]) and (not close_min))
or ((indep_var > wave._indep_vector[-1]) and (not close_max))
),
)
if close_min:
return wave._dep_vector[0]
if close_max:
return wave._dep_vector[-1]
idx = np.searchsorted(wave._indep_vector, indep_var)
xdelta = wave._indep_vector[idx] - wave._indep_vector[idx - 1]
ydelta = wave._dep_vector[idx] - wave._dep_vector[idx - 1]
slope = ydelta / float(xdelta)
return wave._dep_vector[idx - 1] + slope * (indep_var - wave._indep_vector[idx - 1])
|
pmacosta/peng
|
peng/wave_functions.py
|
Python
|
mit
| 62,878
|
import copy
import datetime
import re
import time
from unittest import TestCase, skipIf
import mongomock
from mongomock import ConfigurationError
from mongomock import Database
from mongomock import InvalidURI
from mongomock import OperationFailure
from .utils import DBRef
try:
from bson.objectid import ObjectId
import pymongo
from pymongo import MongoClient as PymongoClient
_HAVE_PYMONGO = True
except ImportError:
from mongomock.object_id import ObjectId
_HAVE_PYMONGO = False
try:
from bson.code import Code
from bson.son import SON
import execjs # noqa
_HAVE_MAP_REDUCE = True
except ImportError:
_HAVE_MAP_REDUCE = False
from nose.tools import assert_raises
from tests.multicollection import MultiCollection
class InterfaceTest(TestCase):
def test__can_create_db_without_path(self):
self.assertIsNotNone(mongomock.MongoClient())
def test__can_create_db_with_path(self):
self.assertIsNotNone(mongomock.MongoClient('mongodb://localhost'))
def test__repr(self):
self.assertEqual(repr(mongomock.MongoClient()),
"mongomock.MongoClient('localhost', 27017)")
def test__bad_uri_raises(self):
with assert_raises(InvalidURI):
mongomock.MongoClient("http://host1")
with assert_raises(InvalidURI):
mongomock.MongoClient("://host1")
def test__none_uri_host(self):
self.assertIsNotNone(mongomock.MongoClient('host1'))
self.assertIsNotNone(mongomock.MongoClient('//host2'))
self.assertIsNotNone(mongomock.MongoClient('mongodb:host2'))
class DatabaseGettingTest(TestCase):
def setUp(self):
super(DatabaseGettingTest, self).setUp()
self.client = mongomock.MongoClient()
def test__getting_database_via_getattr(self):
db1 = self.client.some_database_here
db2 = self.client.some_database_here
self.assertIs(db1, db2)
self.assertIs(db1, self.client['some_database_here'])
self.assertIsInstance(db1, Database)
self.assertIs(db1.client, self.client)
self.assertIs(db2.client, self.client)
def test__getting_database_via_getitem(self):
db1 = self.client['some_database_here']
db2 = self.client['some_database_here']
self.assertIs(db1, db2)
self.assertIs(db1, self.client.some_database_here)
self.assertIsInstance(db1, Database)
def test__drop_database(self):
db = self.client.a
collection = db.a
doc_id = collection.insert({"aa": "bb"})
result = collection.find({"_id": doc_id})
self.assertEqual(result.count(), 1)
self.client.drop_database("a")
result = collection.find({"_id": doc_id})
self.assertEqual(result.count(), 0)
db = self.client.a
collection = db.a
doc_id = collection.insert({"aa": "bb"})
result = collection.find({"_id": doc_id})
self.assertEqual(result.count(), 1)
self.client.drop_database(db)
result = collection.find({"_id": doc_id})
self.assertEqual(result.count(), 0)
def test__alive(self):
self.assertTrue(self.client.alive())
def test__dereference(self):
db = self.client.a
collection = db.a
to_insert = {"_id": "a", "aa": "bb"}
collection.insert(to_insert)
a = db.dereference(DBRef("a", "a", db.name))
self.assertEqual(to_insert, a)
def test__getting_default_database_valid(self):
def gddb(uri):
client = mongomock.MongoClient(uri)
return client, client.get_default_database()
c, db = gddb("mongodb://host1/foo")
self.assertIsNotNone(db)
self.assertIsInstance(db, Database)
self.assertIs(db.client, c)
self.assertIs(db, c['foo'])
c, db = gddb("mongodb://host1/bar")
self.assertIs(db, c['bar'])
c, db = gddb(r"mongodb://a%00lice:f%00oo@127.0.0.1/t%00est")
self.assertIs(db, c["t\x00est"])
c, db = gddb("mongodb://bob:bar@[::1]:27018/admin")
self.assertIs(db, c['admin'])
c, db = gddb("mongodb://%24am:f%3Azzb%40zz@127.0.0.1/"
"admin%3F?authMechanism=MONGODB-CR")
self.assertIs(db, c['admin?'])
def test__getting_default_database_invalid(self):
def client(uri):
return mongomock.MongoClient(uri)
c = client("mongodb://host1")
with assert_raises(ConfigurationError):
c.get_default_database()
c = client("host1")
with assert_raises(ConfigurationError):
c.get_default_database()
c = client("")
with assert_raises(ConfigurationError):
c.get_default_database()
c = client("mongodb://host1/")
with assert_raises(ConfigurationError):
c.get_default_database()
@skipIf(not _HAVE_PYMONGO, "pymongo not installed")
class _CollectionComparisonTest(TestCase):
"""Compares a fake collection with the real mongo collection implementation
This is done via cross-comparison of the results.
"""
def setUp(self):
super(_CollectionComparisonTest, self).setUp()
self.fake_conn = mongomock.MongoClient()
self.mongo_conn = self._connect_to_local_mongodb()
self.db_name = "mongomock___testing_db"
self.collection_name = "mongomock___testing_collection"
self.mongo_conn[self.db_name][self.collection_name].remove()
self.mongo_collection = self.mongo_conn[self.db_name][self.collection_name]
self.fake_collection = self.fake_conn[self.db_name][self.collection_name]
self.cmp = MultiCollection({
"fake": self.fake_collection,
"real": self.mongo_collection,
})
def _connect_to_local_mongodb(self, num_retries=60):
"""Performs retries on connection refused errors (for travis-ci builds)"""
for retry in range(num_retries):
if retry > 0:
time.sleep(0.5)
try:
return PymongoClient()
except pymongo.errors.ConnectionFailure as e:
if retry == num_retries - 1:
raise
if "connection refused" not in e.message.lower():
raise
class MongoClientCollectionTest(_CollectionComparisonTest):
def test__find_is_empty(self):
self.cmp.do.remove()
self.cmp.compare.find()
def test__inserting(self):
self.cmp.do.remove()
data = {"a": 1, "b": 2, "c": "data"}
self.cmp.do.insert(data)
self.cmp.compare.find() # single document, no need to ignore order
def test__bulk_insert(self):
objs = [{"a": 2, "b": {"c": 3}}, {"c": 5}, {"d": 7}]
results_dict = self.cmp.do.insert(objs)
for results in results_dict.values():
self.assertEqual(len(results), len(objs))
self.assertEqual(
len(set(results)), len(results),
"Returned object ids not unique!")
self.cmp.compare_ignore_order.find()
def test__insert_one(self):
self.cmp.do.insert_one({'a': 1})
self.cmp.compare.find()
def test__insert_many(self):
self.cmp.do.insert_many([{'a': 1}, {'a': 2}])
self.cmp.compare.find()
def test__save(self):
# add an item with a non ObjectId _id first.
self.cmp.do.insert({"_id": "b"})
self.cmp.do.save({"_id": ObjectId(), "someProp": 1})
self.cmp.compare_ignore_order.find()
def test__insert_object_id_as_dict(self):
self.cmp.do.remove()
doc_ids = [
# simple top-level dictionary
{'A': 1},
# dict with value as list
{'A': [1, 2, 3]},
# dict with value as dict
{'A': {'sub': {'subsub': 3}}}
]
for doc_id in doc_ids:
_id = self.cmp.do.insert({'_id': doc_id, 'a': 1})
self.assertEqual(_id['fake'], _id['real'])
self.assertEqual(_id['fake'], doc_id)
self.assertEqual(_id['real'], doc_id)
self.assertEqual(type(_id['fake']), type(_id['real']))
self.cmp.compare.find({'_id': doc_id})
docs = self.cmp.compare.find_one({'_id': doc_id})
self.assertEqual(docs['fake']['_id'], doc_id)
self.assertEqual(docs['real']['_id'], doc_id)
self.cmp.do.remove({'_id': doc_id})
def test__count(self):
self.cmp.compare.count()
self.cmp.do.insert({"a": 1})
self.cmp.compare.count()
self.cmp.do.insert({"a": 0})
self.cmp.compare.count()
self.cmp.compare.count({"a": 1})
def test__find_one(self):
self.cmp.do.insert({"_id": "id1", "name": "new"})
self.cmp.compare.find_one({"_id": "id1"})
self.cmp.do.insert({"_id": "id2", "name": "another new"})
self.cmp.compare.find_one({"_id": "id2"}, {"_id": 1})
self.cmp.compare.find_one("id2", {"_id": 1})
def test__find_one_no_args(self):
self.cmp.do.insert({"_id": "new_obj", "field": "value"})
self.cmp.compare.find_one()
def test__find_by_attributes(self):
id1 = ObjectId()
self.cmp.do.insert({"_id": id1, "name": "new"})
self.cmp.do.insert({"name": "another new"})
self.cmp.compare_ignore_order.find()
self.cmp.compare.find({"_id": id1})
def test__find_by_document(self):
self.cmp.do.insert({"name": "new", "doc": {"key": "val"}})
self.cmp.do.insert({"name": "another new"})
self.cmp.compare_ignore_order.find()
self.cmp.compare.find({"doc": {"key": "val"}})
self.cmp.compare.find({"doc": {"key": {'$eq': 'val'}}})
def test__find_by_attributes_return_fields(self):
id1 = ObjectId()
id2 = ObjectId()
self.cmp.do.insert({"_id": id1, "name": "new", "someOtherProp": 2})
self.cmp.do.insert({"_id": id2, "name": "another new"})
self.cmp.compare_ignore_order.find({}, {"_id": 0}) # test exclusion of _id
self.cmp.compare_ignore_order.find({}, {"_id": 1, "someOtherProp": 1}) # test inclusion
self.cmp.compare_ignore_order.find({}, {"_id": 0, "someOtherProp": 0}) # test exclusion
self.cmp.compare_ignore_order.find({}, {"_id": 0, "someOtherProp": 1}) # test mixed _id:0
self.cmp.compare_ignore_order.find({}, {"someOtherProp": 0}) # test no _id, otherProp:0
self.cmp.compare_ignore_order.find({}, {"someOtherProp": 1}) # test no _id, otherProp:1
self.cmp.compare.find({"_id": id1}, {"_id": 0}) # test exclusion of _id
self.cmp.compare.find({"_id": id1}, {"_id": 1, "someOtherProp": 1}) # test inclusion
self.cmp.compare.find({"_id": id1}, {"_id": 0, "someOtherProp": 0}) # test exclusion
# test mixed _id:0
self.cmp.compare.find({"_id": id1}, {"_id": 0, "someOtherProp": 1})
# test no _id, otherProp:0
self.cmp.compare.find({"_id": id1}, {"someOtherProp": 0})
# test no _id, otherProp:1
self.cmp.compare.find({"_id": id1}, {"someOtherProp": 1})
def test__find_by_attributes_return_fields_elemMatch(self):
id = ObjectId()
self.cmp.do.insert({
'_id': id,
'owns': [
{'type': 'hat', 'color': 'black'},
{'type': 'hat', 'color': 'green'},
{'type': 't-shirt', 'color': 'black', 'size': 'small'},
{'type': 't-shirt', 'color': 'black'},
{'type': 't-shirt', 'color': 'white'}
],
'hat': 'red'
})
elem = {'$elemMatch': {'type': 't-shirt', 'color': 'black'}}
# test filtering on array field only
self.cmp.compare.find({'_id': id}, {'owns': elem})
# test filtering on array field with inclusion
self.cmp.compare.find({'_id': id}, {'owns': elem, 'hat': 1})
# test filtering on array field with exclusion
self.cmp.compare.find({'_id': id}, {'owns': elem, 'hat': 0})
# test filtering on non array field
self.cmp.compare.find({'_id': id}, {'hat': elem})
# test no match
self.cmp.compare.find({'_id': id}, {'owns': {'$elemMatch': {'type': 'cap'}}})
def test__regex_match_non_string(self):
id = ObjectId()
self.cmp.do.insert({
'_id': id,
'test': 1
})
self.cmp.compare.find({'_id': id, 'test': {'$regex': '1'}})
def test__find_by_dotted_attributes(self):
"""Test seaching with dot notation."""
green_bowler = {
'name': 'bob',
'hat': {'color': 'green', 'type': 'bowler'}}
red_bowler = {
'name': 'sam',
'hat': {'color': 'red', 'type': 'bowler'}}
self.cmp.do.insert(green_bowler)
self.cmp.do.insert(red_bowler)
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({"name": "sam"})
self.cmp.compare_ignore_order.find({'hat.color': 'green'})
self.cmp.compare_ignore_order.find({'hat.type': 'bowler'})
self.cmp.compare.find({
'hat.color': 'red',
'hat.type': 'bowler'
})
self.cmp.compare.find({
'name': 'bob',
'hat.color': 'red',
'hat.type': 'bowler'
})
self.cmp.compare.find({'hat': 'a hat'})
self.cmp.compare.find({'hat.color.cat': 'red'})
def test__find_empty_array_field(self):
# See #90
self.cmp.do.insert({'array_field': []})
self.cmp.compare.find({'array_field': []})
def test__find_non_empty_array_field(self):
# See #90
self.cmp.do.insert({'array_field': [['abc']]})
self.cmp.do.insert({'array_field': ['def']})
self.cmp.compare.find({'array_field': ['abc']})
self.cmp.compare.find({'array_field': [['abc']]})
self.cmp.compare.find({'array_field': 'def'})
self.cmp.compare.find({'array_field': ['def']})
def test__find_by_objectid_in_list(self):
# See #79
self.cmp.do.insert(
{'_id': 'x', 'rel_id': [ObjectId('52d669dcad547f059424f783')]})
self.cmp.compare.find({'rel_id': ObjectId('52d669dcad547f059424f783')})
def test__find_subselect_in_list(self):
# See #78
self.cmp.do.insert({'_id': 'some_id', 'a': [{'b': 1, 'c': 2}]})
self.cmp.compare.find_one({'a.b': 1})
def test__find_by_regex_object(self):
"""Test searching with regular expression objects."""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find()
regex = re.compile('bob|sam')
self.cmp.compare_ignore_order.find({"name": regex})
regex = re.compile('bob|notsam')
self.cmp.compare_ignore_order.find({"name": regex})
def test__find_by_regex_string(self):
"""Test searching with regular expression string."""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({"name": {'$regex': 'bob|sam'}})
self.cmp.compare_ignore_order.find({'name': {'$regex': 'bob|notsam'}})
def test__find_in_array_by_regex_object(self):
"""Test searching inside array with regular expression object."""
bob = {'name': 'bob', 'text': ['abcd', 'cde']}
sam = {'name': 'sam', 'text': ['bde']}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
regex = re.compile('^a')
self.cmp.compare_ignore_order.find({"text": regex})
regex = re.compile('e$')
self.cmp.compare_ignore_order.find({"text": regex})
regex = re.compile('bde|cde')
self.cmp.compare_ignore_order.find({"text": regex})
def test__find_in_array_by_regex_string(self):
"""Test searching inside array with regular expression string"""
bob = {'name': 'bob', 'text': ['abcd', 'cde']}
sam = {'name': 'sam', 'text': ['bde']}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find({"text": {'$regex': '^a'}})
self.cmp.compare_ignore_order.find({"text": {'$regex': 'e$'}})
self.cmp.compare_ignore_order.find({"text": {'$regex': 'bcd|cde'}})
def test__find_by_regex_string_on_absent_field_dont_break(self):
"""Test searching on absent field with regular expression string dont break"""
bob = {'name': 'bob'}
sam = {'name': 'sam'}
self.cmp.do.insert(bob)
self.cmp.do.insert(sam)
self.cmp.compare_ignore_order.find({"text": {'$regex': 'bob|sam'}})
def test__find_by_elemMatch(self):
self.cmp.do.insert({"field": [{"a": 1, "b": 2}, {"c": 3, "d": 4}]})
self.cmp.do.insert({"field": [{"a": 1, "b": 4}, {"c": 3, "d": 8}]})
self.cmp.do.insert({"field": "nonlist"})
self.cmp.do.insert({"field": 2})
self.cmp.compare.find({"field": {"$elemMatch": {"b": 1}}})
self.cmp.compare_ignore_order.find({"field": {"$elemMatch": {"a": 1}}})
self.cmp.compare.find({"field": {"$elemMatch": {"b": {"$gt": 3}}}})
def test__find_in_array(self):
self.cmp.do.insert({"field": [{"a": 1, "b": 2}, {"c": 3, "d": 4}]})
self.cmp.compare.find({"field.0.a": 1})
self.cmp.compare.find({"field.0.b": 2})
self.cmp.compare.find({"field.1.c": 3})
self.cmp.compare.find({"field.1.d": 4})
self.cmp.compare.find({"field.0": {"$exists": True}})
self.cmp.compare.find({"field.0": {"$exists": False}})
self.cmp.compare.find({"field.0.a": {"$exists": True}})
self.cmp.compare.find({"field.0.a": {"$exists": False}})
self.cmp.compare.find({"field.1.a": {"$exists": True}})
self.cmp.compare.find({"field.1.a": {"$exists": False}})
self.cmp.compare.find(
{"field.0.a": {"$exists": True}, "field.1.a": {"$exists": False}})
def test__find_notequal(self):
"""Test searching with operators other than equality."""
bob = {'_id': 1, 'name': 'bob'}
sam = {'_id': 2, 'name': 'sam'}
a_goat = {'_id': 3, 'goatness': 'very'}
self.cmp.do.insert([bob, sam, a_goat])
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$ne': 'bob'}})
self.cmp.compare_ignore_order.find({'goatness': {'$ne': 'very'}})
self.cmp.compare_ignore_order.find({'goatness': {'$ne': 'not very'}})
self.cmp.compare_ignore_order.find({'snakeness': {'$ne': 'very'}})
def test__find_notequal_by_value(self):
"""Test searching for None."""
bob = {'_id': 1, 'name': 'bob', 'sheepness': {'sometimes': True}}
sam = {'_id': 2, 'name': 'sam', 'sheepness': {'sometimes': True}}
a_goat = {'_id': 3, 'goatness': 'very', 'sheepness': {}}
self.cmp.do.insert([bob, sam, a_goat])
self.cmp.compare_ignore_order.find({'goatness': None})
self.cmp.compare_ignore_order.find({'sheepness.sometimes': None})
def test__find_not(self):
bob = {'_id': 1, 'name': 'bob'}
sam = {'_id': 2, 'name': 'sam'}
self.cmp.do.insert([bob, sam])
self.cmp.compare_ignore_order.find()
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'bob'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'sam'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$ne': 'dan'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$eq': 'bob'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$eq': 'sam'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': {'$eq': 'dan'}}})
self.cmp.compare_ignore_order.find({'name': {'$not': re.compile('dan')}})
def test__find_not_exceptions(self):
self.cmp.do.insert(dict(noise="longhorn"))
with assert_raises(OperationFailure):
self.mongo_collection.find({'name': {'$not': True}}).count()
with assert_raises(OperationFailure):
self.fake_collection.find({'name': {'$not': True}}).count()
with assert_raises(OperationFailure):
self.mongo_collection.find({'name': {'$not': {'$regex': ''}}}).count()
with assert_raises(OperationFailure):
self.fake_collection.find({'name': {'$not': {'$regex': ''}}}).count()
with assert_raises(OperationFailure):
self.mongo_collection.find({'name': {'$not': []}}).count()
with assert_raises(OperationFailure):
self.fake_collection.find({'name': {'$not': []}}).count()
with assert_raises(OperationFailure):
self.mongo_collection.find({'name': {'$not': ''}}).count()
with assert_raises(OperationFailure):
self.fake_collection.find({'name': {'$not': ''}}).count()
def test__find_compare(self):
self.cmp.do.insert(dict(noise="longhorn"))
for x in range(10):
self.cmp.do.insert(dict(num=x, sqrd=x * x))
self.cmp.compare_ignore_order.find({'sqrd': {'$lte': 4}})
self.cmp.compare_ignore_order.find({'sqrd': {'$lt': 4}})
self.cmp.compare_ignore_order.find({'sqrd': {'$gte': 64}})
self.cmp.compare_ignore_order.find({'sqrd': {'$gte': 25, '$lte': 36}})
def test__find_sets(self):
single = 4
even = [2, 4, 6, 8]
prime = [2, 3, 5, 7]
self.cmp.do.insert([
dict(x=single),
dict(x=even),
dict(x=prime)])
self.cmp.compare_ignore_order.find({'x': {'$in': [7, 8]}})
self.cmp.compare_ignore_order.find({'x': {'$in': [4, 5]}})
self.cmp.compare_ignore_order.find({'x': {'$nin': [2, 5]}})
self.cmp.compare_ignore_order.find({'x': {'$all': [2, 5]}})
self.cmp.compare_ignore_order.find({'x': {'$all': [7, 8]}})
self.cmp.compare_ignore_order.find({'x': 2})
self.cmp.compare_ignore_order.find({'x': 4})
self.cmp.compare_ignore_order.find({'$or': [{'x': 4}, {'x': 2}]})
self.cmp.compare_ignore_order.find({'$or': [{'x': 4}, {'x': 7}]})
self.cmp.compare_ignore_order.find({'$and': [{'x': 2}, {'x': 7}]})
self.cmp.compare_ignore_order.find({'$nor': [{'x': 3}]})
self.cmp.compare_ignore_order.find({'$nor': [{'x': 4}, {'x': 2}]})
def test__find_and_modify_remove(self):
self.cmp.do.insert([{"a": x} for x in range(10)])
self.cmp.do.find_and_modify({"a": 2}, remove=True)
self.cmp.compare_ignore_order.find()
def test__find_one_and_delete(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find_one_and_delete({'a': 5}, {'_id': False})
self.cmp.compare.find()
def test__find_one_and_replace(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find_one_and_replace(
{'a': 5}, {'a': 11}, projection={'_id': False})
self.cmp.compare.find()
def test__find_one_and_update(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find_one_and_update(
{'a': 5}, {'$set': {'a': 11}}, projection={'_id': False})
self.cmp.compare.find()
def test__find_sort_list(self):
self.cmp.do.remove()
for data in ({"a": 1, "b": 3, "c": "data1"},
{"a": 2, "b": 2, "c": "data3"},
{"a": 3, "b": 1, "c": "data2"}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort=[("a", 1), ("b", -1)])
self.cmp.compare.find(sort=[("b", 1), ("a", -1)])
self.cmp.compare.find(sort=[("b", 1), ("a", -1), ("c", 1)])
def test__find_sort_list_empty_order(self):
self.cmp.do.remove()
for data in ({"a": 1},
{"a": 2, "b": -2},
{"a": 3, "b": 4}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort=[("b", 1)])
self.cmp.compare.find(sort=[("b", -1)])
def test__find_sort_list_nested_doc(self):
self.cmp.do.remove()
for data in ({"root": {"a": 1, "b": 3, "c": "data1"}},
{"root": {"a": 2, "b": 2, "c": "data3"}},
{"root": {"a": 3, "b": 1, "c": "data2"}}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort=[("root.a", 1), ("root.b", -1)])
self.cmp.compare.find(sort=[("root.b", 1), ("root.a", -1)])
self.cmp.compare.find(
sort=[
("root.b", 1), ("root.a", -1), ("root.c", 1)])
def test__find_sort_list_nested_list(self):
self.cmp.do.remove()
for data in ({"root": [{"a": 1, "b": 3, "c": "data1"}]},
{"root": [{"a": 2, "b": 2, "c": "data3"}]},
{"root": [{"a": 3, "b": 1, "c": "data2"}]}):
self.cmp.do.insert(data)
self.cmp.compare.find(sort=[("root.0.a", 1), ("root.0.b", -1)])
self.cmp.compare.find(sort=[("root.0.b", 1), ("root.0.a", -1)])
self.cmp.compare.find(
sort=[
("root.0.b", 1), ("root.0.a", -1), ("root.0.c", 1)])
def test__find_limit(self):
self.cmp.do.remove()
for data in ({"a": 1, "b": 3, "c": "data1"},
{"a": 2, "b": 2, "c": "data3"},
{"a": 3, "b": 1, "c": "data2"}):
self.cmp.do.insert(data)
self.cmp.compare.find(limit=2, sort=[("a", 1), ("b", -1)])
# pymongo limit defaults to 0, returning everything
self.cmp.compare.find(limit=0, sort=[("a", 1), ("b", -1)])
# def test__as_class(self):
# class MyDict(dict):
# pass
#
# self.cmp.do.remove()
# self.cmp.do.insert(
# {"a": 1, "b": {"ba": 3, "bb": 4, "bc": [{"bca": 5}]}})
# self.cmp.compare.find({}, as_class=MyDict)
# self.cmp.compare.find({"a": 1}, as_class=MyDict)
def test__return_only_selected_fields(self):
self.cmp.do.insert({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection=['type'])
def test__return_only_selected_fields_no_id(self):
self.cmp.do.insert({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'type': 1, '_id': 0})
def test__return_only_selected_fields_nested_field_found(self):
self.cmp.do.insert(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection=['properties.type'])
def test__return_only_selected_fields_nested_field_not_found(self):
self.cmp.do.insert(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection=['properties.color'])
def test__return_only_selected_fields_nested_field_found_no_id(self):
self.cmp.do.insert(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.type': 1, '_id': 0})
def test__return_only_selected_fields_nested_field_not_found_no_id(self):
self.cmp.do.insert(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.color': 1, '_id': 0})
def test__exclude_selected_fields(self):
self.cmp.do.insert({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'type': 0})
def test__exclude_selected_fields_including_id(self):
self.cmp.do.insert({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'type': 0, '_id': 0})
def test__exclude_all_fields_including_id(self):
self.cmp.do.insert({'name': 'Chucky', 'type': 'doll'})
self.cmp.compare.find(
{'name': 'Chucky'}, projection={'type': 0, '_id': 0, 'name': 0})
def test__exclude_selected_nested_fields(self):
self.cmp.do.insert(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.type': 0})
def test__exclude_all_selected_nested_fields(self):
self.cmp.do.insert(
{'name': 'Chucky', 'properties': {'type': 'doll', 'model': 'v6'}})
self.cmp.compare_ignore_order.find(
{'name': 'Chucky'}, projection={'properties.type': 0, 'properties.model': 0})
def test__default_fields_to_id_if_empty(self):
self.cmp.do.insert({'name': 'Chucky', 'type': 'doll', 'model': 'v6'})
self.cmp.compare_ignore_order.find({'name': 'Chucky'}, projection=[])
def test__remove(self):
"""Test the remove method."""
self.cmp.do.insert({"value": 1})
self.cmp.compare_ignore_order.find()
self.cmp.do.remove()
self.cmp.compare.find()
self.cmp.do.insert([
{'name': 'bob'},
{'name': 'sam'},
])
self.cmp.compare_ignore_order.find()
self.cmp.do.remove({'name': 'bob'})
self.cmp.compare_ignore_order.find()
self.cmp.do.remove({'name': 'notsam'})
self.cmp.compare.find()
self.cmp.do.remove({'name': 'sam'})
self.cmp.compare.find()
def test__delete_one(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find()
self.cmp.do.delete_one({'a': 5})
self.cmp.compare.find()
def test__delete_many(self):
self.cmp.do.insert_many([{'a': i} for i in range(10)])
self.cmp.compare.find()
self.cmp.do.delete_many({'a': {'$gt': 5}})
self.cmp.compare.find()
def test__update(self):
doc = {"a": 1}
self.cmp.do.insert(doc)
new_document = {"new_attr": 2}
self.cmp.do.update({"a": 1}, new_document)
self.cmp.compare_ignore_order.find()
def test__update_upsert_with_id(self):
self.cmp.do.update(
{'a': 1}, {'_id': ObjectId('52d669dcad547f059424f783'), 'a': 1}, upsert=True)
self.cmp.compare.find()
def test__update_upsert_with_dots(self):
self.cmp.do.update(
{'a.b': 1}, {'$set': {'c': 2}}, upsert=True)
self.cmp.compare.find()
def test__update_one(self):
self.cmp.do.insert_many([{'a': 1, 'b': 0},
{'a': 2, 'b': 0}])
self.cmp.compare.find()
self.cmp.do.update_one({'a': 2}, {'$set': {'b': 1}})
self.cmp.compare.find()
self.cmp.do.update_one({'a': 3}, {'$set': {'a': 3, 'b': 0}})
self.cmp.compare.find()
self.cmp.do.update_one({'a': 3}, {'$set': {'a': 3, 'b': 0}},
upsert=True)
self.cmp.compare.find()
def test__update_many(self):
self.cmp.do.insert_many([{'a': 1, 'b': 0},
{'a': 2, 'b': 0}])
self.cmp.compare.find()
self.cmp.do.update_many({'b': 1}, {'$set': {'b': 1}})
self.cmp.compare.find()
self.cmp.do.update_many({'b': 0}, {'$set': {'b': 1}})
self.cmp.compare.find()
def test__replace_one(self):
self.cmp.do.insert_many([{'a': 1, 'b': 0},
{'a': 2, 'b': 0}])
self.cmp.compare.find()
self.cmp.do.replace_one({'a': 2}, {'a': 3, 'b': 0})
self.cmp.compare.find()
self.cmp.do.replace_one({'a': 4}, {'a': 4, 'b': 0})
self.cmp.compare.find()
self.cmp.do.replace_one({'a': 4}, {'a': 4, 'b': 0}, upsert=True)
self.cmp.compare.find()
def test__set(self):
"""Tests calling update with $set members."""
self.cmp.do.update({'_id': 42},
{'$set': {'some': 'thing'}},
upsert=True)
self.cmp.compare.find({'_id': 42})
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$set': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$set': {'hat': 'red'}})
self.cmp.compare.find({'name': 'bob'})
def test__unset(self):
"""Tests calling update with $unset members."""
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': 0}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': 1}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': ""}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': True}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'a': 'aaa'}, upsert=True)
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$unset': {'a': False}})
self.cmp.compare.find({'name': 'bob'})
def test__unset_nested(self):
self.cmp.do.update({'_id': 1}, {'$set': {'a': {'b': 1, 'c': 2}}}, upsert=True)
self.cmp.do.update({'_id': 1}, {'$unset': {'a.b': True}})
self.cmp.compare.find()
self.cmp.do.update({'_id': 1}, {'$set': {'a': {'b': 1, 'c': 2}}}, upsert=True)
self.cmp.do.update({'_id': 1}, {'$unset': {'a.b': False}})
self.cmp.compare.find()
self.cmp.do.update({'_id': 1}, {'$set': {'a': {'b': 1}}}, upsert=True)
self.cmp.do.update({'_id': 1}, {'$unset': {'a.b': True}})
self.cmp.compare.find()
self.cmp.do.update({'_id': 1}, {'$set': {'a': {'b': 1}}}, upsert=True)
self.cmp.do.update({'_id': 1}, {'$unset': {'a.b': False}})
self.cmp.compare.find()
def test__set_upsert(self):
self.cmp.do.remove()
self.cmp.do.update({"name": "bob"}, {"$set": {"age": 1}}, True)
self.cmp.compare.find()
self.cmp.do.update({"name": "alice"}, {"$set": {"age": 1}}, True)
self.cmp.compare_ignore_order.find()
def test__set_subdocuments(self):
"""Tests using $set for setting subdocument fields"""
self.skipTest(
"MongoClient does not allow setting subdocuments on existing non-documents")
self.cmp.do.insert(
{'name': 'bob', 'data1': 1, 'subdocument': {'a': {'b': {'c': 20}}}})
self.cmp.do.update({'name': 'bob'}, {'$set': {'data1.field1': 11}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$set': {'data2.field1': 21}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$set': {'subdocument.a.b': 21}})
self.cmp.compare.find()
def test__set_subdocuments_positional(self):
self.cmp.do.insert({'name': 'bob', 'subdocs': [
{'id': 1, 'name': 'foo'},
{'id': 2, 'name': 'bar'}
]})
self.cmp.do.update({'name': 'bob', 'subdocs.id': 2},
{'$set': {'subdocs.$': {'id': 3, 'name': 'baz'}}})
self.cmp.compare.find()
def test__inc(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$inc': {'count': 1}})
self.cmp.compare.find({'name': 'bob'})
def test__max(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$max': {'count': i}})
self.cmp.compare.find({'name': 'bob'})
def test__min(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$min': {'count': i}})
self.cmp.compare.find({'name': 'bob'})
def test__inc_upsert(self):
self.cmp.do.remove()
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$inc': {'count': 1}}, True)
self.cmp.compare.find({'name': 'bob'})
def test__inc_subdocument(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update({'name': 'bob'}, {'$inc': {'data.age': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$inc': {'data.age2': 1}})
self.cmp.compare.find()
def test__inc_subdocument_positional(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update({'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$inc': {'data.$.age': 1}})
self.cmp.compare.find()
def test__setOnInsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'age': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'age': 1}})
self.cmp.compare.find()
def test__setOnInsert_upsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'age': 1}}, True)
self.cmp.compare.find()
def test__setOnInsert_subdocument(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'data.age': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'bob'}, {'$setOnInsert': {'data.age1': 1}})
self.cmp.compare.find()
self.cmp.do.update({'name': 'ann'}, {'$setOnInsert': {'data.age': 1}})
self.cmp.compare.find()
def test__setOnInsert_subdocument_upsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': {'age': 0}})
self.cmp.do.update(
{'name': 'bob'}, {'$setOnInsert': {'data.age': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update(
{'name': 'bob'}, {'$setOnInsert': {'data.age1': 1}}, True)
self.cmp.compare.find()
self.cmp.do.update(
{'name': 'ann'}, {'$setOnInsert': {'data.age': 1}}, True)
self.cmp.compare.find()
def test__setOnInsert_subdocument_elemMatch(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update({'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$setOnInsert': {'data.$.age': 1}})
self.cmp.compare.find()
def test__inc_subdocument_positional_upsert(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'data': [{'age': 0}, {'age': 1}]})
self.cmp.do.update({'name': 'bob', 'data': {'$elemMatch': {'age': 0}}},
{'$setOnInsert': {'data.$.age': 1}}, True)
self.cmp.compare.find()
def test__addToSet(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$addToSet': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
for i in range(3):
self.cmp.do.update({'name': 'bob'}, {'$addToSet': {'hat': 'tall'}})
self.cmp.compare.find({'name': 'bob'})
def test__addToSet_nested(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update(
{'name': 'bob'}, {'$addToSet': {'hat.color': 'green'}})
self.cmp.compare.find({'name': 'bob'})
for i in range(3):
self.cmp.do.update(
{'name': 'bob'}, {'$addToSet': {'hat.color': 'tall'}})
self.cmp.compare.find({'name': 'bob'})
def test__addToSet_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
for i in range(3):
self.cmp.do.update(
{'name': 'bob'},
{'$addToSet': {'hat': {'$each': ['green', 'yellow']}}})
self.cmp.compare.find({'name': 'bob'})
for i in range(3):
self.cmp.do.update(
{'name': 'bob'},
{'$addToSet': {'shirt.color': {'$each': ['green', 'yellow']}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$pull': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update({'name': 'bob'}, {'$pull': {'hat': 'green'}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_query(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': [{'size': 5}, {'size': 10}]})
self.cmp.do.update(
{'name': 'bob'}, {'$pull': {'hat': {'size': {'$gt': 6}}}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {'sizes': [{'size': 5}, {'size': 10}]}})
self.cmp.do.update(
{'name': 'bob'}, {'$pull': {'hat.sizes': {'size': {'$gt': 6}}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_nested_dict(self):
self.cmp.do.remove()
self.cmp.do.insert({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$pull': {'hat.$.sizes': {'size': 'L'}}})
self.cmp.compare.find({'name': 'bob'})
def test__pull_nested_list(self):
self.cmp.do.remove()
self.cmp.do.insert(
{'name': 'bob', 'hat':
[{'name': 'derby', 'sizes': ['L', 'XL']},
{'name': 'cap', 'sizes': ['S', 'L']}]})
self.cmp.do.update(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$pull': {'hat.$.sizes': 'XL'}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {'nested': ['element1', 'element2', 'element1']}})
self.cmp.do.update({'name': 'bob'}, {'$pull': {'hat.nested': 'element1'}})
self.cmp.compare.find({'name': 'bob'})
def test__pullAll(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat': ['green']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update(
{'name': 'bob'}, {'$pullAll': {'hat': ['green', 'blue']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall', 'blue']})
self.cmp.do.update({'name': 'bob'}, {'$pullAll': {'hat': ['green']}})
self.cmp.compare.find({'name': 'bob'})
def test__pullAll_nested_dict(self):
self.cmp.do.remove()
self.cmp.do.insert(
{'name': 'bob', 'hat': {'properties': {'sizes': ['M', 'L', 'XL']}}})
self.cmp.do.update(
{'name': 'bob'}, {'$pullAll': {'hat.properties.sizes': ['M']}})
self.cmp.compare.find({'name': 'bob'})
self.cmp.do.remove()
self.cmp.do.insert(
{'name': 'bob', 'hat': {'properties': {'sizes': ['M', 'L', 'XL']}}})
self.cmp.do.update(
{'name': 'bob'},
{'$pullAll': {'hat.properties.sizes': ['M', 'L']}})
self.cmp.compare.find({'name': 'bob'})
def test__push(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': 'wide'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_dict(self):
self.cmp.do.remove()
self.cmp.do.insert(
{'name': 'bob', 'hat': [{'name': 'derby', 'sizes': ['L', 'XL']}]})
self.cmp.do.update(
{'name': 'bob'},
{'$push': {'hat': {'name': 'cap', 'sizes': ['S', 'L']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': ['green', 'tall']})
self.cmp.do.update(
{'name': 'bob'}, {'$push': {'hat': {'$each': ['wide', 'blue']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict(self):
self.cmp.do.remove()
self.cmp.do.insert({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$push': {'hat.$.sizes': {'size': 'M', 'quantity': 6}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_dict_each(self):
self.cmp.do.remove()
self.cmp.do.insert({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': [{'size': 'L', 'quantity': 3},
{'size': 'XL', 'quantity': 4}],
'colors': ['green', 'blue']},
{'name': 'cap',
'sizes': [{'size': 'S', 'quantity': 10},
{'size': 'L', 'quantity': 5}],
'colors': ['blue']}]})
self.cmp.do.update(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$push':
{'hat.$.sizes':
{'$each':
[{'size': 'M', 'quantity': 6}, {'size': 'S', 'quantity': 1}]}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_list_each(self):
self.cmp.do.remove()
self.cmp.do.insert({
'name': 'bob',
'hat': [
{'name': 'derby',
'sizes': ['L', 'XL'],
'colors': ['green', 'blue']},
{'name': 'cap', 'sizes': ['S', 'L'],
'colors': ['blue']}
]
})
self.cmp.do.update(
{'hat': {'$elemMatch': {'name': 'derby'}}},
{'$push': {'hat.$.sizes': {'$each': ['M', 'S']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_attribute(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {'data': {'sizes': ['XL']}}})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat.data.sizes': 'L'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_nested_attribute_each(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob', 'hat': {}})
self.cmp.do.update(
{'name': 'bob'}, {'$push': {'hat.first': {'$each': ['a', 'b']}}})
self.cmp.compare.find({'name': 'bob'})
def test__push_to_absent_nested_attribute(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat.data.sizes': 'L'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_to_absent_field(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update({'name': 'bob'}, {'$push': {'hat': 'wide'}})
self.cmp.compare.find({'name': 'bob'})
def test__push_each_to_absent_field(self):
self.cmp.do.remove()
self.cmp.do.insert({'name': 'bob'})
self.cmp.do.update(
{'name': 'bob'}, {'$push': {'hat': {'$each': ['wide', 'blue']}}})
self.cmp.compare.find({'name': 'bob'})
def test__drop(self):
self.cmp.do.insert({"name": "another new"})
self.cmp.do.drop()
self.cmp.compare.find({})
def test__ensure_index(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do.ensure_index("name")
self.cmp.do.ensure_index("hat", cache_for=100)
self.cmp.do.ensure_index([("name", 1), ("hat", -1)])
def test__drop_index(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do.drop_index("name")
def test__index_information(self):
# Does nothing - just make sure it exists
self.cmp.do.index_information()
def test__empty_logical_operators(self):
for operator in mongomock.filtering.LOGICAL_OPERATOR_MAP:
try:
self.cmp.compare_ignore_order.find({operator: []})
except Exception as e:
assert isinstance(e, mongomock.OperationFailure)
@skipIf(not _HAVE_PYMONGO, "pymongo not installed")
@skipIf(not _HAVE_MAP_REDUCE, "execjs not installed")
class CollectionMapReduceTest(TestCase):
def setUp(self):
self.db = mongomock.MongoClient().map_reduce_test
self.data = [{"x": 1, "tags": ["dog", "cat"]},
{"x": 2, "tags": ["cat"]},
{"x": 3, "tags": ["mouse", "cat", "dog"]},
{"x": 4, "tags": []}]
for item in self.data:
self.db.things.insert(item)
self.map_func = Code("""
function() {
this.tags.forEach(function(z) {
emit(z, 1);
});
}""")
self.reduce_func = Code("""
function(key, values) {
var total = 0;
for(var i = 0; i<values.length; i++) {
total += values[i];
}
return total;
}""")
self.expected_results = [{'_id': 'mouse', 'value': 1},
{'_id': 'dog', 'value': 2},
{'_id': 'cat', 'value': 3}]
def test__map_reduce(self):
self._check_map_reduce(self.db.things, self.expected_results)
def test__map_reduce_clean_res_colc(self):
# Checks that the result collection is cleaned between calls
self._check_map_reduce(self.db.things, self.expected_results)
more_data = [{"x": 1, "tags": []},
{"x": 2, "tags": []},
{"x": 3, "tags": []},
{"x": 4, "tags": []}]
for item in more_data:
self.db.more_things.insert(item)
expected_results = []
self._check_map_reduce(self.db.more_things, expected_results)
def _check_map_reduce(self, colc, expected_results):
result = colc.map_reduce(self.map_func, self.reduce_func, 'myresults')
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), len(expected_results))
for doc in result.find():
self.assertIn(doc, expected_results)
def test__map_reduce_son(self):
result = self.db.things.map_reduce(
self.map_func, self.reduce_func,
out=SON([('replace', 'results'), ('db', 'map_reduce_son_test')]))
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'results')
self.assertEqual(result._database.name, 'map_reduce_son_test')
self.assertEqual(result.count(), 3)
for doc in result.find():
self.assertIn(doc, self.expected_results)
def test__map_reduce_full_response(self):
expected_full_response = {
'counts': {
'input': 4,
'reduce': 2,
'emit': 6,
'output': 3
},
'timeMillis': 5,
'ok': 1.0,
'result': 'myresults'
}
result = self.db.things.map_reduce(
self.map_func, self.reduce_func,
'myresults', full_response=True)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['counts'], expected_full_response['counts'])
self.assertEqual(result['result'], expected_full_response['result'])
for doc in getattr(self.db, result['result']).find():
self.assertIn(doc, self.expected_results)
def test__map_reduce_with_query(self):
expected_results = [{'_id': 'mouse', 'value': 1},
{'_id': 'dog', 'value': 2},
{'_id': 'cat', 'value': 2}]
result = self.db.things.map_reduce(
self.map_func, self.reduce_func,
'myresults', query={'tags': 'dog'})
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), 3)
for doc in result.find():
self.assertIn(doc, expected_results)
def test__map_reduce_with_limit(self):
result = self.db.things.map_reduce(
self.map_func, self.reduce_func, 'myresults', limit=2)
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), 2)
def test__inline_map_reduce(self):
result = self.db.things.inline_map_reduce(
self.map_func, self.reduce_func)
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 3)
for doc in result:
self.assertIn(doc, self.expected_results)
def test__inline_map_reduce_full_response(self):
expected_full_response = {
'counts': {
'input': 4,
'reduce': 2,
'emit': 6,
'output': 3
},
'timeMillis': 5,
'ok': 1.0,
'result': [
{'_id': 'cat', 'value': 3},
{'_id': 'dog', 'value': 2},
{'_id': 'mouse', 'value': 1}]
}
result = self.db.things.inline_map_reduce(
self.map_func, self.reduce_func, full_response=True)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['counts'], expected_full_response['counts'])
for doc in result['result']:
self.assertIn(doc, self.expected_results)
def test__map_reduce_with_object_id(self):
obj1 = ObjectId()
obj2 = ObjectId()
data = [{"x": 1, "tags": [obj1, obj2]},
{"x": 2, "tags": [obj1]}]
for item in data:
self.db.things_with_obj.insert(item)
expected_results = [{'_id': obj1, 'value': 2},
{'_id': obj2, 'value': 1}]
result = self.db.things_with_obj.map_reduce(
self.map_func, self.reduce_func, 'myresults')
self.assertTrue(isinstance(result, mongomock.Collection))
self.assertEqual(result.name, 'myresults')
self.assertEqual(result.count(), 2)
for doc in result.find():
self.assertIn(doc, expected_results)
@skipIf(not _HAVE_PYMONGO, "pymongo not installed")
@skipIf(not _HAVE_MAP_REDUCE, "execjs not installed")
class _GroupTest(_CollectionComparisonTest):
def setUp(self):
_CollectionComparisonTest.setUp(self)
self._id1 = ObjectId()
self.data = [
{"a": 1, "count": 4},
{"a": 1, "count": 2},
{"a": 1, "count": 4},
{"a": 2, "count": 3},
{"a": 2, "count": 1},
{"a": 1, "count": 5},
{"a": 4, "count": 4},
{"b": 4, "foo": 4},
{"b": 2, "foo": 3, "name": "theone"},
{"b": 1, "foo": 2},
{"b": 1, "foo": self._id1},
]
for item in self.data:
self.cmp.do.insert(item)
def test__group1(self):
key = ["a"]
initial = {"count": 0}
condition = {"a": {"$lt": 3}}
reduce_func = Code("""
function(cur, result) { result.count += cur.count }
""")
self.cmp.compare.group(key, condition, initial, reduce_func)
def test__group2(self):
reduce_func = Code("""
function(cur, result) { result.count += 1 }
""")
self.cmp.compare.group(
key=["b"],
condition={"foo": {"$in": [3, 4]}, "name": "theone"},
initial={"count": 0},
reduce=reduce_func)
def test__group3(self):
reducer = Code("""
function(obj, result) {result.count+=1 }
""")
conditions = {'foo': {'$in': [self._id1]}}
self.cmp.compare.group(
key=['foo'],
condition=conditions,
initial={"count": 0},
reduce=reducer)
@skipIf(not _HAVE_PYMONGO, "pymongo not installed")
@skipIf(not _HAVE_MAP_REDUCE, "execjs not installed")
class MongoClientAggregateTest(_CollectionComparisonTest):
def setUp(self):
super(MongoClientAggregateTest, self).setUp()
self.data = [
{"_id": ObjectId(), "a": 1, "b": 1, "count": 4, "swallows": ['European swallow'],
"date": datetime.datetime(2015, 10, 1, 10, 0)},
{"_id": ObjectId(), "a": 1, "b": 1, "count": 2, "swallows": ['African swallow'],
"date": datetime.datetime(2015, 12, 1, 12, 0)},
{"_id": ObjectId(), "a": 1, "b": 2, "count": 4, "swallows": ['European swallow'],
"date": datetime.datetime(2014, 10, 2, 12, 0)},
{"_id": ObjectId(), "a": 2, "b": 2, "count": 3, "swallows": ['African swallow',
'European swallow'],
"date": datetime.datetime(2015, 1, 2, 10, 0)},
{"_id": ObjectId(), "a": 2, "b": 3, "count": 1, "swallows": [],
"date": datetime.datetime(2013, 1, 3, 12, 0)},
{"_id": ObjectId(), "a": 1, "b": 4, "count": 5, "swallows": ['African swallow',
'European swallow'],
"date": datetime.datetime(2015, 8, 4, 12, 0)},
{"_id": ObjectId(), "a": 4, "b": 4, "count": 4, "swallows": ['unladen swallow'],
"date": datetime.datetime(2014, 7, 4, 13, 0)}]
for item in self.data:
self.cmp.do.insert(item)
def test__aggregate1(self):
pipeline = [
{'$match': {'a': {'$lt': 3}}},
{'$sort': {'_id': -1}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate2(self):
pipeline = [
{'$group': {'_id': '$a', 'count': {'$sum': '$count'}}},
{'$match': {'a': {'$lt': 3}}},
{'$sort': {'_id': -1, 'count': 1}},
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate3(self):
pipeline = [
{'$group': {'_id': 'a', 'count': {'$sum': '$count'}}},
{'$match': {'a': {'$lt': 3}}},
{'$sort': {'_id': -1, 'count': 1}},
{'$skip': 1},
{'$limit': 2}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate4(self):
pipeline = [
{'$unwind': '$swallows'},
{'$sort': {'count': -1, 'swallows': -1}}]
self.cmp.compare.aggregate(pipeline)
def test__aggregate5(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a'}, 'total': {'$sum': '$count'},
'avg': {'$avg': '$count'}}},
{'$sort': {'_id.a': 1, 'total': 1, 'avg': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate6(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a', 'id_b': '$b'}, 'total': {'$sum': '$count'},
'avg': {'$avg': '$count'}}},
{'$sort': {'_id.id_a': 1, '_id.id_b': 1, 'total': 1, 'avg': 1}}
]
self.cmp.compare.aggregate(pipeline)
def test__aggregate7(self):
pipeline = [
{'$group': {'_id': {'id_a': '$a', 'id_b': {'$year': '$date'}},
'total': {'$sum': '$count'}, 'avg': {'$avg': '$count'}}},
{'$sort': {'_id.id_a': 1, '_id.id_b': 1, 'total': 1, 'avg': 1}}
]
self.cmp.compare.aggregate(pipeline)
def _LIMIT(*args):
return lambda cursor: cursor.limit(*args)
def _SORT(*args):
return lambda cursor: cursor.sort(*args)
def _SKIP(*args):
return lambda cursor: cursor.skip(*args)
class MongoClientSortSkipLimitTest(_CollectionComparisonTest):
def setUp(self):
super(MongoClientSortSkipLimitTest, self).setUp()
self.cmp.do.insert([{"_id": i, "index": i} for i in range(30)])
def test__skip(self):
self.cmp.compare(_SORT("index", 1), _SKIP(10)).find()
def test__limit(self):
self.cmp.compare(_SORT("index", 1), _LIMIT(10)).find()
def test__skip_and_limit(self):
self.cmp.compare(_SORT("index", 1), _SKIP(10), _LIMIT(10)).find()
def test__sort_name(self):
self.cmp.do.remove()
for data in ({"a": 1, "b": 3, "c": "data1"},
{"a": 2, "b": 2, "c": "data3"},
{"a": 3, "b": 1, "c": "data2"}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT("a")).find()
self.cmp.compare(_SORT("b")).find()
def test__sort_name_nested_doc(self):
self.cmp.do.remove()
for data in ({"root": {"a": 1, "b": 3, "c": "data1"}},
{"root": {"a": 2, "b": 2, "c": "data3"}},
{"root": {"a": 3, "b": 1, "c": "data2"}}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT("root.a")).find()
self.cmp.compare(_SORT("root.b")).find()
def test__sort_name_nested_list(self):
self.cmp.do.remove()
for data in ({"root": [{"a": 1, "b": 3, "c": "data1"}]},
{"root": [{"a": 2, "b": 2, "c": "data3"}]},
{"root": [{"a": 3, "b": 1, "c": "data2"}]}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT("root.0.a")).find()
self.cmp.compare(_SORT("root.0.b")).find()
def test__sort_list(self):
self.cmp.do.remove()
for data in ({"a": 1, "b": 3, "c": "data1"},
{"a": 2, "b": 2, "c": "data3"},
{"a": 3, "b": 1, "c": "data2"}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT([("a", 1), ("b", -1)])).find()
self.cmp.compare(_SORT([("b", 1), ("a", -1)])).find()
self.cmp.compare(_SORT([("b", 1), ("a", -1), ("c", 1)])).find()
def test__sort_list_nested_doc(self):
self.cmp.do.remove()
for data in ({"root": {"a": 1, "b": 3, "c": "data1"}},
{"root": {"a": 2, "b": 2, "c": "data3"}},
{"root": {"a": 3, "b": 1, "c": "data2"}}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT([("root.a", 1), ("root.b", -1)])).find()
self.cmp.compare(_SORT([("root.b", 1), ("root.a", -1)])).find()
self.cmp.compare(
_SORT([("root.b", 1), ("root.a", -1), ("root.c", 1)])).find()
def test__sort_list_nested_list(self):
self.cmp.do.remove()
for data in ({"root": [{"a": 1, "b": 3, "c": "data1"}]},
{"root": [{"a": 2, "b": 2, "c": "data3"}]},
{"root": [{"a": 3, "b": 1, "c": "data2"}]}):
self.cmp.do.insert(data)
self.cmp.compare(_SORT([("root.0.a", 1), ("root.0.b", -1)])).find()
self.cmp.compare(_SORT([("root.0.b", 1), ("root.0.a", -1)])).find()
self.cmp.compare(
_SORT(
[("root.0.b", 1), ("root.0.a", -1),
("root.0.c", 1)])).find()
def test__close(self):
# Does nothing - just make sure it exists and takes the right args
self.cmp.do(lambda cursor: cursor.close()).find()
class InsertedDocumentTest(TestCase):
def setUp(self):
super(InsertedDocumentTest, self).setUp()
self.collection = mongomock.MongoClient().db.collection
self.data = {"a": 1, "b": [1, 2, 3], "c": {"d": 4}}
self.orig_data = copy.deepcopy(self.data)
self.object_id = self.collection.insert(self.data)
def test__object_is_consistent(self):
[object] = self.collection.find()
self.assertEqual(object["_id"], self.object_id)
def test__find_by_id(self):
[object] = self.collection.find({"_id": self.object_id})
self.assertEqual(object, self.data)
def test__remove_by_id(self):
self.collection.remove(self.object_id)
self.assertEqual(0, self.collection.count())
def test__inserting_changes_argument(self):
# Like pymongo, we should fill the _id in the inserted dict
# (odd behavior, but we need to stick to it)
self.assertEqual(self.data, dict(self.orig_data, _id=self.object_id))
def test__data_is_copied(self):
[object] = self.collection.find()
self.assertEqual(dict(self.orig_data, _id=self.object_id), object)
self.data.pop("a")
self.data["b"].append(5)
self.assertEqual(dict(self.orig_data, _id=self.object_id), object)
[object] = self.collection.find()
self.assertEqual(dict(self.orig_data, _id=self.object_id), object)
def test__find_returns_copied_object(self):
[object1] = self.collection.find()
[object2] = self.collection.find()
self.assertEqual(object1, object2)
self.assertIsNot(object1, object2)
object1["b"].append("bla")
self.assertNotEqual(object1, object2)
class ObjectIdTest(TestCase):
def test__equal_with_same_id(self):
obj1 = ObjectId()
obj2 = ObjectId(str(obj1))
self.assertEqual(obj1, obj2)
class DatabasesNamesTest(TestCase):
def setUp(self):
super(DatabasesNamesTest, self).setUp()
self.client = mongomock.MongoClient()
def test__database_names(self):
self.client.unit.tests.insert({'foo': 'bar'})
self.client.foo.bar.insert({'unit': 'test'})
names = self.client.database_names()
self.assertIsInstance(names, list)
self.assertEqual(sorted(['foo', 'unit']), sorted(names))
|
StarfishStorage/mongomock
|
tests/test__mongomock.py
|
Python
|
bsd-3-clause
| 67,868
|
#!/usr/bin/env python3.4
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen, PIPE
def exe_cmd(*cmds):
"""Executes commands in a new shell. Directing stderr to PIPE.
This is fastboot's own exe_cmd because of its peculiar way of writing
non-error info to stderr.
Args:
cmds: A sequence of commands and arguments.
Returns:
The output of the command run.
Raises:
Exception is raised if an error occurred during the command execution.
"""
cmd = ' '.join(cmds)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
(out, err) = proc.communicate()
if not err:
return out
return err
class FastbootProxy():
"""Proxy class for fastboot.
For syntactic reasons, the '-' in fastboot commands need to be replaced
with '_'. Can directly execute fastboot commands on an object:
>> fb = FastbootProxy(<serial>)
>> fb.devices() # will return the console output of "fastboot devices".
"""
def __init__(self, serial=""):
self.serial = serial
if serial:
self.fastboot_str = "fastboot -s {}".format(serial)
else:
self.fastboot_str = "fastboot"
def _exec_fastboot_cmd(self, name, arg_str):
return exe_cmd(' '.join((self.fastboot_str, name, arg_str)))
def args(self, *args):
return exe_cmd(' '.join((self.fastboot_str, ) + args))
def __getattr__(self, name):
def fastboot_call(*args):
clean_name = name.replace('_', '-')
arg_str = ' '.join(str(elem) for elem in args)
return self._exec_fastboot_cmd(clean_name, arg_str)
return fastboot_call
|
yusufm/mobly
|
mobly/controllers/android_device_lib/fastboot.py
|
Python
|
apache-2.0
| 2,228
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add max tries column to task instance
Revision ID: cc1e65623dc7
Revises: 127d2bf2dfa7
Create Date: 2017-06-19 16:53:12.851141
"""
# revision identifiers, used by Alembic.
revision = 'cc1e65623dc7'
down_revision = '127d2bf2dfa7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from airflow import settings
from airflow.models import DagBag, TaskInstance
from sqlalchemy.engine.reflection import Inspector
BATCH_SIZE = 5000
def upgrade():
op.add_column('task_instance', sa.Column('max_tries', sa.Integer,
server_default="-1"))
# Check if table task_instance exist before data migration. This check is
# needed for database that does not create table until migration finishes.
# Checking task_instance table exists prevent the error of querying
# non-existing task_instance table.
connection = op.get_bind()
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names()
if 'task_instance' in tables:
# Get current session
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(
TaskInstance.max_tries == -1
)
# Separate db query in batch to prevent loading entire table
# into memory and cause out of memory error.
while query.scalar():
tis = session.query(TaskInstance).filter(
TaskInstance.max_tries == -1
).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
# task_instance table might not have the up-to-date
# information, i.e dag or task might be modified or
# deleted in dagbag but is reflected in task instance
# table. In this case we do not retry the task that can't
# be parsed.
ti.max_tries = ti.try_number
else:
task = dag.get_task(ti.task_id)
if task.retries:
ti.max_tries = task.retries
else:
ti.max_tries = ti.try_number
session.merge(ti)
session.commit()
# Commit the current session.
session.commit()
def downgrade():
engine = settings.engine
if engine.dialect.has_table(engine, 'task_instance'):
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(
TaskInstance.max_tries != -1
)
while query.scalar():
tis = session.query(TaskInstance).filter(
TaskInstance.max_tries != -1
).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
ti.try_number = 0
else:
task = dag.get_task(ti.task_id)
# max_tries - try_number is number of times a task instance
# left to retry by itself. So the current try_number should be
# max number of self retry (task.retries) minus number of
# times left for task instance to try the task.
ti.try_number = max(0, task.retries - (ti.max_tries -
ti.try_number))
ti.max_tries = -1
session.merge(ti)
session.commit()
session.commit()
op.drop_column('task_instance', 'max_tries')
|
Tagar/incubator-airflow
|
airflow/migrations/versions/cc1e65623dc7_add_max_tries_column_to_task_instance.py
|
Python
|
apache-2.0
| 4,685
|
import collections
import importlib
import importlib.machinery
import io
import itertools
from itertools import chain
import pprint
import sys
from tokenize import * #tokenize, untokenize, ERRORTOKEN, NAME, OP
def _call_with_frames_removed(f, *args, **kwargs):
return f(*args, **kwargs)
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(itertools.islice(iterator, n, n), None)
def groupwise_longest(iterable, n):
bits = itertools.tee(iterable, n)
for i, bit in enumerate(bits):
consume(bit, i)
return itertools.zip_longest(*bits)
def retokenize(tokens):
"""Coalesce None.
Replace:
name?.attr
with:
name.attr if name is not None else None
"""
# See http://bugs.python.org/issue16224#msg211469
# "Switching from 5-tuples to 2-tuples... is not currently a
# supported use case". In particular, if you do so in the middle
# of an indented block, the dedent doesn't match the indent. So,
# we have to force everything to 2-tuples.
groups = groupwise_longest(tokens, 4)
for w, x, y, z in groups:
if (w[0] == NAME and z is not None and
x[0] == ERRORTOKEN and x[1] == '?' and
y[0] == OP and y[1] == '.'):
yield w[0], w[1]
yield y[0], y[1]
yield z[0], z[1]
yield NAME, 'if'
yield w[0], w[1]
yield NAME, 'is'
yield NAME, 'not'
yield NAME, 'None'
yield NAME, 'else'
yield NAME, 'None'
consume(groups, 3)
else:
yield w[0], w[1]
class NoneCoaLoader(importlib.machinery.SourceFileLoader):
def source_to_code(self, data, path, *, _optimize=-1):
print(path)
source = importlib._bootstrap.decode_source(data)
tokens = tokenize(io.BytesIO(source.encode('utf-8')).readline)
tokens = retokenize(tokens)
source = untokenize(tokens).decode('utf-8')
return _call_with_frames_removed(compile, source, path, 'exec',
dont_inherit=True,
optimize=_optimize)
_real_pathfinder = sys.meta_path[-1]
class NoneCoaFinder(type(_real_pathfinder)):
@classmethod
def find_module(cls, fullname, path=None):
spec = _real_pathfinder.find_spec(fullname, path)
if not spec: return spec
loader = spec.loader
if type(loader).__name__ == 'SourceFileLoader':
loader.__class__ = NoneCoaLoader
return loader
sys.meta_path[-1] = NoneCoaFinder
|
abarnert/nonehack
|
nonecoa.py
|
Python
|
mit
| 2,928
|
__author__ = 'Blake'
from jinja2 import Template
from basetypes import TextContentAsset
class Page(TextContentAsset):
def __init__(self, obj, site = None):
super(Page, self).__init__(obj, site)
@property
def extension(self):
return 'html'
@property
def as_HTML(self):
template = self.site.theme.jinja_environment.get_template(self.filename)
template = template.render(**self.jinja_obj)
return template
|
blakev/synpost
|
synpost/objects/content/page.py
|
Python
|
mit
| 466
|
#!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""
This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather
than an application having to define all flags in or near main(), each
python module defines flags that are useful to it. When one python
module imports another, it gains access to the other's flags. (This
is implemented by having all modules share a common, global registry
object containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with
a 'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments
and pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as members of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instancee
gflags.FLAGS.myflag. Typically, the __main__ module passes the
command line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All
flags take a name, default value, help-string, and optional 'short'
name (one-letter name). Some flags have other arguments, which are
described with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and
upper_bound; if the number specified on the command line
is out of range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints),
even if the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help (or -?) prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all the flags in the 'main' .py file only
--flagfile=foo read flags from foo.
-- as in getopt(), terminates flag-processing
Note on --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag
per line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag
'--flagfile=somefile'. You CAN recursively nest flagfile= tokens OR
use multiple files on the command line. Lines beginning with a single
hash '#' or a double slash '//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested
files in a different dir than they are executing out of. Relative
path names are always from CWD, not from the directory of the parent
include flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
import gflags
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string("name", "Mr. President" "NAME: your name")
gflags.DEFINE_integer("age", None, "AGE: your age in years", lower_bound=0)
gflags.DEFINE_boolean("debug", 0, "produces debugging output")
gflags.DEFINE_enum("gender", "male", ["male", "female"],
"GENDER: your gender")
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print "You are a %s, who is %d years old" % (FLAGS.gender, FLAGS.age)
if __name__ == '__main__': main(sys.argv)
"""
import getopt
import os
import sys
# Are we running at least python 2.2?
try:
if tuple(sys.version_info[:3]) < (2,2,0):
raise NotImplementedError("requires python 2.2.0 or later")
except AttributeError: # a very old python, that lacks sys.version_info
raise NotImplementedError("requires python 2.2.0 or later")
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
# module exceptions:
class FlagsError(Exception): "The base class for all flags errors"
class DuplicateFlag(FlagsError): "Thrown if there is a flag naming conflict"
class IllegalFlagValue(FlagsError): "The flag command line argument is illegal"
# Global variable used by expvar
_exported_flags = {}
def __GetModuleName(globals_dict):
"""Given a globals dict, find the module in which it's defined."""
for name, module in sys.modules.iteritems():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
return sys.argv[0]
return name
raise AssertionError, "No module was found"
def __GetCallingModule():
"""Get the name of the module that's calling into this module; e.g.,
the module calling a DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
return __GetModuleName(sys._getframe(depth).f_globals)
raise AssertionError, "No module was found"
def _GetMainModule():
"""Get the module name from which execution started."""
for depth in range(1, sys.getrecursionlimit()):
try:
globals_of_main = sys._getframe(depth).f_globals
except ValueError:
return __GetModuleName(globals_of_main)
raise AssertionError, "No module was found"
class FlagValues:
"""
Used as a registry for 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value member of the registered 'Flag' objects can be accessed as
members of this 'FlagValues' object, through __getattr__. Both the
long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded,
# the only way of defining and using fields is to access __dict__
# directly.
self.__dict__['__flags'] = {}
self.__dict__['__flags_by_module'] = {} # A dict module -> list of flag
def FlagDict(self):
return self.__dict__['__flags']
def _RegisterFlagByModule(self, module_name, flag):
"""We keep track of which flag is defined by which module so that
we can later sort the flags by module.
"""
flags_by_module = self.__dict__['__flags_by_module']
flags_by_module.setdefault(module_name, []).append(flag)
def __setitem__(self, name, flag):
"""
Register a new flag variable.
"""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue, flag
if not isinstance(name, type("")):
raise FlagsError, "Flag name must be a string"
if len(name) == 0:
raise FlagsError, "Flag name cannot be empty"
# If running under pychecker, duplicate keys are likely to be defined.
# Disable check for duplicate keys when pycheck'ing.
if (fl.has_key(name) and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlag, name
short_name = flag.short_name
if short_name is not None:
if (fl.has_key(short_name) and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlag, short_name
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""
Retrieve the flag object.
"""
return self.FlagDict()[name]
def __getattr__(self, name):
"""
Retrieve the .value member of a flag object.
"""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError, name
return fl[name].value
def __setattr__(self, name, value):
"""
Set the .value member of a flag object.
"""
fl = self.FlagDict()
fl[name].value = value
return value
def __delattr__(self, name):
"""
Delete a previously-defined flag from a flag object.
"""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError, name
del fl[name]
def SetDefault(self, name, value):
"""
Change the default value of the named flag object.
"""
fl = self.FlagDict()
if not fl.has_key(name):
raise AttributeError, name
fl[name].SetDefault(value)
def __contains__(self, name):
"""
Return True if name is a value (flag) in the dict.
"""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return self.FlagDict().iterkeys()
def __call__(self, argv):
"""
Searches argv for flag arguments, parses them and then sets the flag
values as attributes of this FlagValues object. All unparsed
arguments are returned. Flags are parsed using the GNU Program
Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = self.ReadFlagsFromFiles(argv)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv)
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options and
# long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon if it
# takes an argument. Long options are stored in an array of strings.
# Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
try:
optlist, unparsed_args = getopt.getopt(argv[1:], shortopts, longopts)
except getopt.GetoptError, e:
raise FlagsError, e
for name, arg in optlist:
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if fl.has_key(name):
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
if unparsed_args:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
return argv[:1] + original_argv[-len(unparsed_args):]
else:
return argv[:1]
def Reset(self):
"""
Reset the values to the point before FLAGS(argv) was called.
"""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""
Return a list of all registered flags.
"""
return self.FlagDict().keys()
def FlagValuesDict(self):
"""
Return a dictionary with flag names as keys and flag values as values.
"""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""
Generate a help string for all known flags.
"""
helplist = []
flags_by_module = self.__dict__['__flags_by_module']
if flags_by_module:
modules = flags_by_module.keys()
modules.sort()
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [ main_module ] + modules
for module in modules:
self.__RenderModuleFlags(module, helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(self.FlagDict().values(), helplist)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, output_lines):
"""
Generate a help string for a given module.
"""
flags_by_module = self.__dict__['__flags_by_module']
if module in flags_by_module:
output_lines.append('\n%s:' % module)
self.__RenderFlagList(flags_by_module[module], output_lines)
def MainModuleHelp(self):
"""
Generate a help string for all known flags of the main module.
"""
helplist = []
self.__RenderModuleFlags(_GetMainModule(), helplist)
return '\n'.join(helplist)
def __RenderFlagList(self, flaglist, output_lines):
fl = self.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag: # a different flag is using this name now
continue
# only print help once
if flagset.has_key(flag): continue
flagset[flag] = 1
flaghelp = " "
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
if flag.default_as_str:
flaghelp += "\n (default: %s)" % flag.default_as_str
if flag.parser.syntactic_help:
flaghelp += "\n (%s)" % flag.parser.syntactic_help
output_lines.append(flaghelp)
def get(self, name, default):
"""
Retrieve the .value member of a flag object, or default if .value is None
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""
Returns a dictionary mapping flag names to their shortest unique prefix.
"""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique prefix
# by comparing itself to the next name and to the previous name (the latter
# check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
""" Detects the --flagfile= token.
Takes a string which might contain a '--flagfile=<foo>' directive.
Returns a Boolean.
"""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Function to remove the --flagfile= (or variant) and return just the
filename part. We can get strings that look like:
--flagfile=foo, -flagfile=foo.
The case of --flagfile foo and -flagfile foo shouldn't be hitting this
function, as they are dealt with in the level above this funciton.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
return ''
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Function to open a flag file, return its useful (!=comments,etc) lines.
Takes:
A filename to open and read
A list of files we have already read THAT WILL BE CHANGED
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list off
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
print e_msg
print 'ERROR:: Unable to open flagfile: %s' % (filename)
return flag_line_list
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif (line.startswith('#') or line.startswith('//')):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename, parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s'
% sub_filename)
else:
# Any line that's not a comment or a nested flagfile should
# get copied into 2nd position, this leaves earlier arguements
# further back in the list, which makes them have higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv):
"""Process command line args, but also allow args to be read from file
Usage:
Takes: a list of strings, usually sys.argv, which may contain one or more
flagfile directives of the form --flagfile="./filename"
References: Global gflags.FLAG class instance
Returns: a new list which has the original list combined with what we
read from any flagfile(s).
This function should be called before the normal FLAGS(argv) call.
This function simply scans the input list for a flag that looks like:
--flagfile=<somefile>
Then it opens <somefile>, reads all valid key and value pairs and inserts
them into the input list between the first item of the list and any
subsequent items in the list.
Note that your application's flags are still defined the usual way using
gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Any flags on the command line we were passed in _should_ always take
precedence!!!
--> a further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment
--> Entirely blank lines _should_ be ignored
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. Inthis case the next arg
# really is part of this one.
if current_arg == '--flagfile' or current_arg =='-flagfile':
if not rest_of_args:
raise IllegalFlagValue, '--flagfile with no argument'
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv = (new_argv[:1] +
self.__GetFlagFileLines(flag_filename, parsed_file_list) +
new_argv[1:])
else:
new_argv.append(current_arg)
return new_argv
def FlagsIntoString(self):
"""
Retreive a string version of all the flags with assignments stored
in this FlagValues object. Should mirror the behavior of the c++
version of FlagsIntoString. Each flag assignment is seperated by
a newline.
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""
Appends all flags found in this FlagInfo object to the file
specified. Output will be in the format of a flagfile. This
should mirror the behavior of the c++ version of
AppendFlagsIntoFile.
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
#end of the FLAGS registry class
# The global FlagValues instance
FLAGS = FlagValues()
class Flag:
"""
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present member is updated. If this
flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value member. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present member is cleared after __init__
parsing. If the default value is set to None, then the __init__
parsing step is skipped and the .value member is initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
self.default = default
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
if default is None and allow_override:
raise DuplicateFlag, name
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(str(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue, ("flag --%s: " % self.name) + str(e)
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError, "Serializer not present for flag %s" % self.name
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""
Change the default value, and current value, of this flag object
"""
if value is not None: # See __init__ for logic details
self.Parse(value)
self.present -= 1 # reset .present after parsing new default value
else:
self.value = None
self.default = value
self.default_as_str = self.__GetParsedValueAsString(value)
class ArgumentParser:
"""
This is a base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
"""
syntactic_help = ""
def Parse(self, argument):
"""
The default implementation of Parse() accepts any value of argument,
simply returning it unmodified.
"""
return argument
class ArgumentSerializer:
"""
This is the base class for generating string representations of a
flag value
"""
def Serialize(self, value):
return str(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([str(x) for x in value])
# The DEFINE functions are explained in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""
This creates a generic 'Flag' object that parses its arguments with a
'Parser' and registers it with a 'FlagValues' object.
Developers who need to create their own 'Parser' classes should call
this module function. to register their flags. For example:
DEFINE(DatabaseSpec(), "dbspec", "mysql:db0:readonly:hr",
"The primary database")
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""
This registers a 'Flag' object with a 'FlagValues' object. By
default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINEE_integer. But developers
who need to create Flag objects themselves should use this function to
register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
if flag_values == FLAGS:
# We are using the global flags dictionary, so we'll want to sort the
# usage output by calling module in FlagValues.__str__ (FLAGS is an
# instance of FlagValues). This requires us to keep track
# of which module is creating the flags.
# Tell FLAGS who's defining flag.
FLAGS._RegisterFlagByModule(__GetCallingModule(), flag)
###############################
################# STRING FLAGS
###############################
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""
This registers a flag whose value can be any string.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
###############################
################ BOOLEAN FLAGS
###############################
#### and the special HELP flag
###############################
class BooleanParser(ArgumentParser):
"""
A boolean value
"""
def Convert(self, argument):
"""
convert the argument to a boolean (integer); raise ValueError on errors
"""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return 1
elif argument.lower() in ['false', 'f', '0']:
return 0
return int(argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
class BooleanFlag(Flag):
"""
A basic boolean flag. Boolean flags do not take any arguments, and
their value is either 0 (false) or 1 (true). The false value is
specified on the command line by prepending the word 'no' to either
the long or short flag name.
For example, if a Boolean flag was created whose long name was 'update'
and whose short name was 'x', then this flag could be explicitly unset
through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""
This registers a boolean flag - one that does not take an argument.
If a user wants to specify a false value explicitly, the long option
beginning with 'no' must be used: i.e. --noflag
This flag will have a value of None, 0 or 1. None is possible if
default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
###############################
################## FLOAT FLAGS
###############################
class FloatParser(ArgumentParser):
"""
A floating point value; optionally bounded to a given upper and lower
bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound != None and upper_bound != None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound != None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound != None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""
convert the argument to a float; raise ValueError on errors
"""
return float(argument)
def Parse(self, argument):
val = self.Convert(argument)
if ((self.lower_bound != None and val < self.lower_bound) or
(self.upper_bound != None and val > self.upper_bound)):
raise ValueError, "%s is not %s" % (val, self.syntactic_help)
return val
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values = FLAGS, **args):
"""
This registers a flag whose value must be a float. If lower_bound,
or upper_bound are set, then this flag must be within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
###############################
################ INTEGER FLAGS
###############################
class IntegerParser(FloatParser):
"""
An integer value; optionally bounded to a given upper or lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base=16
try:
return int(argument, base)
# ValueError is thrown when argument is a string, and overflows an int.
except ValueError:
return long(argument, base)
else:
try:
return int(argument)
# OverflowError is thrown when argument is numeric, and overflows an int.
except OverflowError:
return long(argument)
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values = FLAGS, **args):
"""
This registers a flag whose value must be an integer. If lower_bound,
or upper_bound are set, then this flag must be within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
###############################
################### ENUM FLAGS
###############################
class EnumParser(ArgumentParser):
"""
A string enum value
"""
def __init__(self, enum_values=None):
self.enum_values = enum_values
def Parse(self, argument):
"""
If enum_values is not specified, any string is allowed
"""
if self.enum_values and argument not in self.enum_values:
raise ValueError, ("value should be one of <%s>"
% "|".join(self.enum_values))
return argument
class EnumFlag(Flag):
"""
A basic enum flag. The flag's value can be any string from the list
of enum_values.
"""
def __init__(self, name, default, help, enum_values=[],
short_name=None, **args):
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""
This registers a flag whose value can be a string from a set of
specified values.
"""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
###############################
################### LIST FLAGS
###############################
class BaseListParser(ArgumentParser):
"""
A base class for a string list parser.
To extend, inherit from this class, and call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and
name is a description of the separator
"""
def __init__(self, token=None, name=None):
assert name
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
class ListParser(BaseListParser):
"""
A string list parser (comma-separated)
"""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
class WhitespaceSeparatedListParser(BaseListParser):
"""
A string list parser (whitespace-separated)
"""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""
This registers a flag whose value is a list of strings, separated by commas
"""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""
This registers a flag whose value is a list of strings, separated by any
whitespace
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
###############################
################## MULTI FLAGS
###############################
class MultiFlag(Flag):
"""
MultiFlag is a specialized subclass of Flag that accumulates
multiple values in a list when a command-line option appears
multiple times.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* the default value may be a single value -OR- a list of values
* the value of the flag is always a list, even if the option was only
supplied once, and even if the default value is a single value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help = (self.help +
';\n repeat this option to specify a list of values')
def Parse(self, arguments):
"""Parse one or more arguments with the installed parser.
Arguments:
arguments: a single argument or a list of arguments (typically a list
of default values); single arguments will be converted internally into
a list containing one item
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments will not
# be, so convert them into a single-item list to make processing simpler
# below.
arguments = [ arguments ]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in member variable
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError, "Serializer not present for flag %s" % self.name
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""
This creates a generic 'MultiFlag' object that parses its arguments with a
'Parser' and registers it with a 'FlagValues' object.
Developers who need to create their own 'Parser' classes for options which
can appear multiple times can call this module function to register their
flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""
This registers a flag whose value can be a list of any strings. Use the flag
on the command line multiple times to place multiple string values into the
list. The 'default' may be a single string (which will be converted into a
single-element list) or a list of strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""
This registers a flag whose value can be a list of any integers. Use the
flag on the command line multiple times to place multiple integer values
into the list. The 'default' may be a single integer (which will be
converted into a single-element list) or a list of integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
|
managementboy/mntv
|
gflags.py
|
Python
|
lgpl-2.1
| 48,851
|
#!/usr/bin/env python3
"""
A program to extract raw text from Telegram chat log
"""
import argparse
from json import loads
def main():
parser = argparse.ArgumentParser(
description="Extract all raw text from a specific Telegram chat")
parser.add_argument('filepath', help='the json chatlog file to analyse')
parser.add_argument('-u','--usernames', help='Show usernames before messages. '
'If someone doesn\'t have a username, the line will start with "@: ".'
'Useful when output will be read back as a chatlog.',
action='store_true')
parser.add_argument('-n','--no-newlines', help='Remove all newlines from messages. Useful when '
'output will be piped into analysis expecting newline separated messages. ',
action='store_true')
args=parser.parse_args()
filepath = args.filepath
with open(filepath, 'r') as jsonfile:
events = (loads(line) for line in jsonfile)
for event in events:
#check the event is the sort we're looking for
if "from" in event and "text" in event:
if args.usernames:
if 'username' in event['from']:
print('@' + event['from']['username'],end=': ')
else:
print('@',end=': ')
if args.no_newlines:
print(event['text'].replace('\n',''))
else:
print(event["text"])
if __name__ == "__main__":
main()
|
expectocode/telegram-analysis
|
getalltext.py
|
Python
|
mit
| 1,599
|
from default_imports import *
from modules.game.AnalysedMove import Analysis, AnalysisBSONHandler
from chess import polyglot, Board
from pymongo.collection import Collection
import pymongo
import logging
AnalysedPositionID = NewType('AnalysedPositionID', str)
class AnalysedPosition(NamedTuple('AnalysedPosition', [
('id', AnalysedPositionID),
('analyses', List[Analysis])
])):
"""
Like an analysed move, but only with SF analysis. Does not contain any other move data.
This is used for accelerating stockfish analysis.
"""
@staticmethod
def fromBoardAndAnalyses(board: Board, analyses: List[Analysis]):
return AnalysedPosition(
id=AnalysedPosition.idFromBoard(board),
analyses=analyses)
@staticmethod
def idFromBoard(board: Board) -> AnalysedPositionID:
return str(polyglot.zobrist_hash(board))
class AnalysedPositionBSONHandler:
@staticmethod
def reads(bson: Dict) -> AnalysedPosition:
return AnalysedPosition(
id=bson['_id'],
analyses=[AnalysisBSONHandler.reads(b) for b in bson['analyses']])
def writes(analysedPosition: AnalysedPosition) -> Dict:
return {
'_id': analysedPosition.id,
'analyses': [AnalysisBSONHandler.writes(a) for a in analysedPosition.analyses]
}
class AnalysedPositionDB(NamedTuple('AnalysedPositionDB', [
('analysedPositionColl', Collection)
])):
def write(self, analysedPosition: AnalysedPosition):
try:
self.analysedPositionColl.update_one(
{'_id': analysedPosition.id},
{'$set': AnalysedPositionBSONHandler.writes(analysedPosition)},
upsert=True)
except pymongo.errors.DuplicateKeyError:
logging.warning("DuplicateKeyError when attempting to write position: " + str(analysedPosition.id))
def writeMany(self, analysedPositions: List[AnalysedPosition]):
[self.write(analysedPosition) for analysedPosition in analysedPositions]
def byBoard(self, board: Board) -> Opt[AnalysedPosition]:
analysedPositionBSON = self.analysedPositionColl.find_one({'_id': AnalysedPosition.idFromBoard(board)})
return None if analysedPositionBSON is None else AnalysedPositionBSONHandler.reads(analysedPositionBSON)
|
clarkerubber/irwin
|
modules/game/AnalysedPosition.py
|
Python
|
agpl-3.0
| 2,340
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.key'
db.add_column(u'walkthrough_project', 'key',
self.gf('randomslugfield.fields.RandomSlugField')(exclude_lower=False, length=6, max_length=6, exclude_digits=False, blank=False, default=None, exclude_vowels=False, exclude_upper=True, null=True),
keep_default=False)
# Changing field 'Project.slug'
db.alter_column(u'walkthrough_project', 'slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', populate_from=['name', 'key'], overwrite=False))
def backwards(self, orm):
# Deleting field 'Project.key'
db.delete_column(u'walkthrough_project', 'key')
# Changing field 'Project.slug'
db.alter_column(u'walkthrough_project', 'slug', self.gf('django_extensions.db.fields.AutoSlugField')(populate_from='name', allow_duplicates=False, max_length=50, separator=u'-', overwrite=False))
models = {
u'walkthrough.mvp': {
'Meta': {'object_name': 'Mvp'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_statement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['walkthrough.Project']", 'unique': 'True'})
},
u'walkthrough.mvpredaction': {
'Meta': {'object_name': 'MvpRedaction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.IntegerField', [], {}),
'mvp': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['walkthrough.Mvp']"}),
'statement_end': ('django.db.models.fields.IntegerField', [], {}),
'statement_start': ('django.db.models.fields.IntegerField', [], {})
},
u'walkthrough.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'key': ('randomslugfield.fields.RandomSlugField', [], {'exclude_lower': 'False', 'length': '6', 'max_length': '6', 'exclude_digits': 'False', 'blank': 'True', 'exclude_vowels': 'False', 'exclude_upper': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['name', 'key']", 'overwrite': 'False'}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tools': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'validate_customer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'validate_offering': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'validate_value_prop': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'walkthrough.ticket': {
'Meta': {'object_name': 'Ticket'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mvp': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['walkthrough.Mvp']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'workstream': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['walkthrough.Workstream']"})
},
u'walkthrough.workstream': {
'Meta': {'unique_together': "(('name', 'mvp'),)", 'object_name': 'Workstream'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.IntegerField', [], {}),
'mvp': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['walkthrough.Mvp']"}),
'name': ('django.db.models.fields.TextField', [], {}),
'owner': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'statement_end': ('django.db.models.fields.IntegerField', [], {}),
'statement_start': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['walkthrough']
|
pinch-tn/pinch-app
|
pinch/walkthrough/migrations/0002_auto__add_field_project_key__chg_field_project_slug.py
|
Python
|
mit
| 4,966
|
import os
import numpy as np
import matplotlib.pyplot as plt
from cs231n.data_utils import load_CIFAR10
from cs231n.classifiers import KNearestNeighbor
cifar10_dir = os.getcwd()+'/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
classes = ['plane', 'car', 'bird', 'cat', 'deer',
'frog', 'horse', 'ship', 'truck']
classes_num = len(classes)
print("训练数据维度:", X_train.shape)
print("训练标签维度:", y_train.shape)
print("测试数据维度:", X_test.shape)
print("测试标签维度:", y_test.shape)
# 可视化几个例子
samples_show_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_show_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * classes_num + y + 1
plt.subplot(samples_show_per_class, classes_num, plt_idx)
# imshow参数可以使一个M*N*3的uint8数组,表示RGB格式
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# 取出样例总数的一部分子样例(十分之一),以加快速度
sub_train_num = 5000
mask = range(sub_train_num)
X_train = X_train[mask]
y_train = y_train[mask]
sub_test_num = 1000
mask = range(sub_test_num)
X_test = X_test[mask]
y_test = y_test[mask]
# 将每幅图像数据转换成行向量
X_train = X_train.reshape(sub_train_num, 32*32*3)
X_test = X_test.reshape(sub_test_num, 32*32*3)
print('抽取%d个子训练样例后,转换成行向量为:' % (sub_train_num), X_train.shape)
print('抽取%d个子测试样例后,转换成行向量为:' % (sub_test_num), X_test.shape)
# 创建一个KNN的分类器对象,开始进行分类
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
#dists = classifier.compute_distances_no_loops(X_test)
#dists = classifier.compute_distances_one_loops(X_test)
#dists = classifier.compute_distances_two_loops(X_test)
# print(dists.shape)
# 可视化距离矩阵
#plt.imshow(dists, interpolation='none')
# plt.show()
# 开始进行准确率判别
#y_test_pred = classifier.predict_labels(dists, k=1)
''' 以下内容暂时注释,为了方便后续的验证。
for k in range(1, 10):
for nl in range(1, 2): # nl设置为1是因为1个循环的方法比2个循环的方法要快很多
for dt in range(1, 7):
y_test_pred = classifier.predict(
X_test, k=k, num_loops=nl, distance_type=dt)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / sub_test_num
print('在参数k=%d, num_loops=%d, distance_type=%d下' % (k, nl, dt))
print('最终的正确率是%d/% d=%f' % (num_correct, sub_test_num, accuracy))
'''
# 为了保证compute_distances_one_loop中关于行向量整体运算的实现是争取二的,我们需要进行验证。
# 有很多种方法可以验证两个矩阵是否相似:
# 1、最简单的是佛罗贝尼乌斯范数(Frobenius norm)。两个矩阵的Frobenius范数是所有元素的平方和
# 的平方根,类似于K^n上的欧几里得范数。等于矩阵与其共轭转置的乘积的轨迹函数的平方根。
# 2、将矩阵转换成一个向量,计算它们的欧几里德距离
dists_two = classifier.compute_distances_two_loops(X_test)
dists_one = classifier.compute_distances_one_loops(X_test)
# np.linalg是核心的线性代数工具包。norm是向量或者矩阵的范数计算
# 其中'fro'表示Frobenius norm,只能用于矩阵,不能用于向量
'''
difference = np.linalg.norm(dists_two - dists_one, ord='fro')
print('使用一个循环和两个循环分别计算差值的结果的差异值是:%f)' % (difference,))
if difference < 0.001:
print('完美')
else:
print('差异值过大!')
'''
# 接下来教程上写的是评估无循环的函数的计算结果。跟上述一样,就不写了。
# 再接下来教程上写的是评估不同次循环体结构的运算时间,无循环结构的运算时间最短。两个嵌套循环结构的
# 运算时间最长。
'''进行交叉验证'''
num_folds = 5 # 将数据分成num_folds份
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
x_train_folds = np.array(np.split(X_train, num_folds)) # 需要用np.array把返回值转换成np.array类型
y_train_folds = np.array(np.split(y_train, num_folds)) # 否则会抛出list indices not tuple的TypeErr
print('分割完成')
# 然后设定一个dictionary对象,保存当进行交叉验证的时候,不同k值下的准确率。交叉验证操作之后,该对象
# 的[k]元素应该是num_folds长度的列表,记载了应用k值作为投票数量下的准确率的变化。
k_to_accuracies = {}
# 接下来使用k投票的交叉验证来确定最佳的k值。对于每个可能的k取值,运行knn算法num_folds次。每次运行的
# 时候,将其中一个fold作为训练集,其他所有的folds作为验证集。
for k in k_choices:
for index in range(num_folds):
others = list(range(num_folds))
others.pop(index)
x_fold_train = x_train_folds[index, :]
y_fold_train = y_train_folds[index, :]
x_cross_valid = x_train_folds[others,:].reshape(X_train.shape[0]-x_fold_train.shape[0]
, X_train.shape[1])
y_cross_valid = y_train_folds[others,:].reshape(y_train.shape[0]-y_fold_train.shape[0])
classifier.train(x_fold_train, y_fold_train)
y_cross_valid_pred = classifier.predict(x_cross_valid,k=k, num_loops=1)
num_correct = np.sum(y_cross_valid_pred == y_cross_valid)
accuracy = float(num_correct) / y_cross_valid.shape[0]
if k_to_accuracies.get(k) is None:
k_to_accuracies[k] = []
k_to_accuracies[k].append(accuracy)
print('准确率是:%f' % accuracy)
# 打印出来计算的值
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print('k=%d, 准确率=%f' % (k, accuracy))
# 对结果画散点图
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k]*len(accuracies), accuracies)
# 画曲线图
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('在k上的交叉验证')
plt.xlabel('k')
plt.ylabel('交叉验证准确率')
#plt.show()
plt.savefig('result.png', format='png')
# 基于上面得到的交叉验证的结果,选择最佳的k值,使用所有的训练集重新训练数据,然后在测试集上进行测试
# 你应该能够得到28%左右的正确率
best_k = 1
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k, num_loops=1)
# 计算并展示准确率结果
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / sub_test_num
print('得到%d / %d 正确率 ==> %f' % (num_correct, sub_test_num, accuracy))
|
BoyuanYan/CIFAR-10
|
knn.py
|
Python
|
apache-2.0
| 7,145
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all line item creative associations for a given line item.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client, line_item_id):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v202111')
# Create a statement to select line item creative associations.
statement = (ad_manager.StatementBuilder(version='v202111')
.Where('lineItemId = :lineItemId')
.WithBindVariable('lineItemId', line_item_id))
# Retrieve a small amount of line item creative associations at a time, paging
# through until all line item creative associations have been retrieved.
while True:
response = lica_service.getLineItemCreativeAssociationsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
for lica in response['results']:
# Print out some information for each line item creative association.
if 'creativeSetId' in lica:
print('LICA with line item ID "%s", creative set ID "%s", and '
'status "%s" was found.' %
(lica['lineItemId'], lica['creativeSetId'], lica['status']))
else:
print('Line item creative association with line item ID "%d" and '
'creative ID "%d" was found.\n' %
(lica['lineItemId'], lica['creativeId']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, LINE_ITEM_ID)
|
googleads/googleads-python-lib
|
examples/ad_manager/v202111/line_item_creative_association_service/get_licas_for_line_item.py
|
Python
|
apache-2.0
| 2,431
|
# Copyright 2011 OpenStack Foundation
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import timeutils
import webob
from manila.api.v1 import share_types as types
from manila.api.views import types as views_types
from manila.common import constants
from manila import exception
from manila import policy
from manila.share import share_types
from manila import test
from manila.tests.api import fakes
def stub_share_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5",
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true",
}
return dict(
id=id,
name='share_type_%s' % str(id),
extra_specs=specs,
required_extra_specs={
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true",
}
)
def return_share_types_get_all_types(context, search_opts=None):
return dict(
share_type_1=stub_share_type(1),
share_type_2=stub_share_type(2),
share_type_3=stub_share_type(3)
)
def return_empty_share_types_get_all_types(context, search_opts=None):
return {}
def return_share_types_get_share_type(context, id=1):
if id == "777":
raise exception.ShareTypeNotFound(share_type_id=id)
return stub_share_type(int(id))
def return_share_types_get_by_name(context, name):
if name == "777":
raise exception.ShareTypeNotFoundByName(share_type_name=name)
return stub_share_type(int(name.split("_")[2]))
@ddt.ddt
class ShareTypesApiTest(test.TestCase):
def setUp(self):
super(ShareTypesApiTest, self).setUp()
self.controller = types.ShareTypesController()
self.mock_object(policy, 'check_policy',
mock.Mock(return_value=True))
@ddt.data(True, False)
def test_share_types_index(self, admin):
self.mock_object(share_types, 'get_all_types',
return_share_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types',
use_admin_context=admin)
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['share_types']))
expected_names = ['share_type_1', 'share_type_2', 'share_type_3']
actual_names = map(lambda e: e['name'], res_dict['share_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['share_types']:
if admin:
self.assertEqual('value1', entry['extra_specs'].get('key1'))
else:
self.assertIsNone(entry['extra_specs'].get('key1'))
self.assertTrue('required_extra_specs' in entry)
required_extra_spec = entry['required_extra_specs'].get(
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS, '')
self.assertEqual('true', required_extra_spec)
policy.check_policy.assert_called_once_with(
req.environ['manila.context'], types.RESOURCE_NAME, 'index')
def test_share_types_index_no_data(self):
self.mock_object(share_types, 'get_all_types',
return_empty_share_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['share_types']))
policy.check_policy.assert_called_once_with(
req.environ['manila.context'], types.RESOURCE_NAME, 'index')
def test_share_types_show(self):
self.mock_object(share_types, 'get_share_type',
return_share_types_get_share_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(2, len(res_dict))
self.assertEqual('1', res_dict['share_type']['id'])
self.assertEqual('share_type_1', res_dict['share_type']['name'])
policy.check_policy.assert_called_once_with(
req.environ['manila.context'], types.RESOURCE_NAME, 'show')
def test_share_types_show_not_found(self):
self.mock_object(share_types, 'get_share_type',
return_share_types_get_share_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
policy.check_policy.assert_called_once_with(
req.environ['manila.context'], types.RESOURCE_NAME, 'show')
def test_share_types_default(self):
self.mock_object(share_types, 'get_default_share_type',
return_share_types_get_share_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
res_dict = self.controller.default(req)
self.assertEqual(2, len(res_dict))
self.assertEqual('1', res_dict['share_type']['id'])
self.assertEqual('share_type_1', res_dict['share_type']['name'])
policy.check_policy.assert_called_once_with(
req.environ['manila.context'], types.RESOURCE_NAME, 'default')
def test_share_types_default_not_found(self):
self.mock_object(share_types, 'get_default_share_type',
mock.Mock(side_effect=exception.ShareTypeNotFound(
share_type_id="fake")))
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.default, req)
policy.check_policy.assert_called_once_with(
req.environ['manila.context'], types.RESOURCE_NAME, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_share_type = dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
required_extra_specs={},
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_share_type)
self.assertIn('share_type', output)
expected_share_type = dict(
name='new_type',
extra_specs={},
required_extra_specs={},
id=42,
)
self.assertDictMatch(output['share_type'], expected_share_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_share_types = []
for i in range(0, 10):
raw_share_types.append(
dict(
name='new_type',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
required_extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_share_types)
self.assertIn('share_types', output)
for i in range(0, 10):
expected_share_type = dict(
name='new_type',
extra_specs={},
required_extra_specs={},
id=42 + i
)
self.assertDictMatch(output['share_types'][i],
expected_share_type)
@ddt.data(None, True, 'true', 'false', 'all')
def test_parse_is_public_valid(self, value):
result = self.controller._parse_is_public(value)
self.assertTrue(result in (True, False, None))
def test_parse_is_public_invalid(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._parse_is_public,
'fakefakefake')
|
redhat-openstack/manila
|
manila/tests/api/v1/test_share_types.py
|
Python
|
apache-2.0
| 8,424
|
# --------------------------------------------------------
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
# QuickMultiAttributeEdit_menu - QGIS plugins menu class
#
# begin : May 9, 2011
# copyright : (c) 2011 by Marco Braida
# email : marcobra.ubuntu at gmail.com
#
# QuickMultiAttributeEdit is free software and is offered
# without guarantee or warranty. You can redistribute it
# and/or modify it under the terms of version 2 of the
# GNU General Public License (GPL v2) as published by the
# Free Software Foundation (www.gnu.org).
# --------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from quickmultiattributeedit_dialogs import *
# ---------------------------------------------
class quickmultiattributeedit_menu:
def __init__(self, iface):
self.iface = iface
def initGui(self):
icon = QIcon(os.path.dirname(__file__) + "/icons/quickmultiattributeedit_update_selected.png")
self.update_selected_action = QAction(icon, "Update field of selected features", self.iface.mainWindow())
QObject.connect(self.update_selected_action, SIGNAL("triggered()"), self.update_selected)
self.iface.registerMainWindowAction(self.update_selected_action, "F12") # self.update_selected_action is triggered by the F12
self.iface.addToolBarIcon(self.update_selected_action)
self.iface.addPluginToMenu("&QuickMultiAttributeEdit", self.update_selected_action)
#self.iface.layerMenu().findChild(QMenu, 'menuNew').addAction(self.action)
def unload(self):
self.iface.unregisterMainWindowAction(self.update_selected_action)
self.iface.removePluginMenu("&quickmultiattributeedit", self.update_selected_action)
def update_selected(self):
dialog = quickmultiattributeedit_update_selected_dialog(self.iface)
dialog.exec_()
|
adamcandy/qgis-plugins-meshing-initial
|
release/QuickMultiAttributeEdit/quickmultiattributeedit_menu.py
|
Python
|
lgpl-2.1
| 3,086
|
'''
Functions related to the creation of the OP_RETURN bytes in accordance
to CRED meta-protocol for issuing/revoking certificates
on the blockchain.
'''
import time
from blockchain_certificates import utils
# Allowed operators -- 2 bytes available
operators = {
'op_issue' : b'\x00\x04',
'op_issue_abs_expiry' : b'\x00\x05',
#'op_issue_rel_expiry' : b'\x00\x07',
'op_revoke_batch' : b'\x00\x08',
'op_revoke_creds' : b'\x00\x0c',
'op_revoke_address' : b'\xff\x00'
}
'''
Creates CRED protocol's issue certificates command
'''
def issue_cmd(issuer_identifier, merkle_root):
bstring = (_create_header() + operators['op_issue'] +
_str_to_8_chars(issuer_identifier).encode('utf-8') +
utils.hex_to_bytes(merkle_root))
return bstring
'''
Creates CRED protocol's issue certificates command with absolute expiry time
expressed in UTC / Unix epoch); it uses 5 bytes for expiry
'''
def issue_abs_expiry_cmd(issuer_identifier, merkle_root, expiry):
expiry = int(expiry)
# if expiry is in the past
if expiry < time.time():
raise TypeError("Absolute expiry is in the past")
if expiry > 0xffffffffff:
raise TypeError("Absolute expiry is greater than allowed")
# uses 5 bytes so convert to hex and right justify (pad) accordingly
expiry_hex = format(expiry, 'x')
expiry_hex_padded = expiry_hex.rjust(10, '0')
bstring = (_create_header() + operators['op_issue_abs_expiry'] +
_str_to_8_chars(issuer_identifier).encode('utf-8') +
utils.hex_to_bytes(merkle_root) +
utils.hex_to_bytes(expiry_hex_padded))
return bstring
'''
Creates CRED protocol's revoke certificates batch command
'''
def revoke_batch_cmd(txid):
bstring = (_create_header() + operators['op_revoke_batch'] +
utils.hex_to_bytes(txid))
return bstring
'''
Creates CRED protocol's revoke certificates command
'''
def revoke_creds_cmd(txid, cred_hash1, cred_hash2=None):
bstring = (_create_header() + operators['op_revoke_creds'] +
utils.hex_to_bytes(txid) +
utils.ripemd160(cred_hash1))
if cred_hash2:
bstring += utils.ripemd160(cred_hash2)
return bstring
'''
Creates CRED protocol's revoke address command
'''
def revoke_address_cmd(pkh):
bstring = (_create_header() + operators['op_revoke_address'] +
utils.hex_to_bytes(pkh))
return bstring
'''
Creates the header for the CRED protocol. Currently consists of
'CRED' and a fixed version in hex.
Versioning: first byte major, second byte minor:
0001=v0.1 - 0101=v1.1 - 000a=v0.10
'''
def _create_header():
major_version = 0 # max 255
minor_version = 1 # max 255
return b'CRED' + bytes([major_version, minor_version])
'''
Returns 8 bytes version of a (utf-8) string. If larger it removes the extra
characters. If shorter it pads with space.
'''
def _str_to_8_chars(string):
length = len(string)
if length < 8:
return string.ljust(8)
elif length > 8:
return string[:8]
else:
return string
'''
Parses op_return (hex) to create a python dictionary for easy access.
Dictionary contains:
version:
cmd: op_issue | op_issue_abs_expiry | op_revoke_batch | op_revoke_creds | op_revoke_address
data:
for op_issue it has -> issuer_identifier, merkle_root
for op_issue_abs_expiry it has -> issuer_identifier, merkle_root, expiry
for op_revoke_batch it has -> txid
for op_revoke_creds it has -> txid, [hashes]
for op_revoke_address it has -> pkh
'''
def parse_op_return_hex(hex_data):
data_dict = {}
# if op_return starts with CRED it is using the meta-protocol
if hex_data.startswith(utils.text_to_hex('CRED')):
# Structure in bytes/hex: 4 + 2 + 2 + 32 bytes = 8 + 4 + 4 + 64 in string hex
# TODO in the future could check version_hex and act depending on version
data_dict['version'] = hex_data[8:12]
data_dict['cmd'] = hex_data[12:16]
data_dict['data'] = {}
if data_dict['cmd'] == hex_op('op_issue'):
data_dict['data']['issuer_identifier'] = hex_data[16:32]
data_dict['data']['merkle_root'] = hex_data[32:96]
elif data_dict['cmd'] == hex_op('op_issue_abs_expiry'):
data_dict['data']['issuer_identifier'] = hex_data[16:32]
data_dict['data']['merkle_root'] = hex_data[32:96]
data_dict['data']['expiry'] = hex_data[96:116]
elif data_dict['cmd'] == hex_op('op_revoke_batch'):
data_dict['data']['txid'] = hex_data[16:80]
elif data_dict['cmd'] == hex_op('op_revoke_creds'):
data_dict['data']['txid'] = hex_data[16:80]
data_dict['data']['hashes'] = []
data_dict['data']['hashes'].append(hex_data[80:120])
if len(hex_data) > 120:
data_dict['data']['hashes'].append(hex_data[120:160])
elif data_dict['cmd'] == hex_op('op_revoke_address'):
data_dict['data']['pkh'] = hex_data[16:56]
else:
return None
else:
return None
return data_dict
'''
Get ASCII hex of operators
'''
def hex_op(op):
return utils.bytes_to_hex(operators[op])
|
UniversityOfNicosia/blockchain-certificates
|
blockchain_certificates/cred_protocol.py
|
Python
|
mit
| 5,280
|
#decorator
def now():
print "2015-11-18"
f=now
f()
print now.__name__
print f.__name__
def log(func):
def wrapper(*args,**kw):
print 'begin call %s():' %func.__name__
func(*args,**kw)
print 'end call %s():' %func.__name__
return wrapper
@log
def now1():
print now1.__name__
now1()
now1=log(now1)
now1()
def log1(text):
def decorator(func):
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log1('execute')
def now2():
print now2.__name__
now2()
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'call %s():' %func.__name__
return func(*args,**kw)
return wrapper
@log2
def now3():
print now3.__name__
now3()
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log3('execute')
def now4():
print now4.__name__
now4()
def log4(text):
if callable(text):
@functools.wraps(text)
def wrapper(*args,**kw):
print 'begin call %s:' %text.__name__
text(*args,**kw)
print 'end call '+text.__name__
return wrapper
else :
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'begin call %s %s():' %(text,func.__name__)
func(*args,**kw)
print 'end call %s %s():' %(text,func.__name__)
return wrapper
return decorator
@log4
def now5():
print 'doing'+now5.__name__
now5()
@log4('execute')
def now6():
print 'doing'+now6.__name__
now6()
|
zengboming/python
|
decorator.py
|
Python
|
apache-2.0
| 1,581
|
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.Sources.FrontendStatus import FrontendStatus
from Components.Sources.StaticText import StaticText
from Components.config import config, configfile, getConfigListEntry
from Components.NimManager import nimmanager, InitNimManager
from Components.TuneTest import Tuner
from enigma import eDVBFrontendParametersSatellite, eDVBResourceManager, eTimer
class AutoDiseqc(Screen, ConfigListScreen):
diseqc_ports = [
"A", "B", "C", "D"
]
sat_frequencies = [
# astra 192 zdf
( 11953, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.FEC_3_4, \
eDVBFrontendParametersSatellite.Inversion_Off, 192, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
1079, 1, "Astra 1 19.2e"),
# astra 235 astra ses
( 12168, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.FEC_3_4, \
eDVBFrontendParametersSatellite.Inversion_Off, 235, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
3224, 3, "Astra 3 23.5e"),
# astra 282 bbc
( 10773, 22000, \
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.FEC_5_6, \
eDVBFrontendParametersSatellite.Inversion_Off, 282, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
2045, 2, "Astra 2 28.2e"),
# hotbird 130 rai
( 10992, 27500, \
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.FEC_2_3, \
eDVBFrontendParametersSatellite.Inversion_Off, 130, \
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_Auto, \
eDVBFrontendParametersSatellite.RollOff_auto, eDVBFrontendParametersSatellite.Pilot_Unknown, \
12400, 318, "Hotbird 13.0e"),
]
SAT_TABLE_FREQUENCY = 0
SAT_TABLE_SYMBOLRATE = 1
SAT_TABLE_POLARISATION = 2
SAT_TABLE_FEC = 3
SAT_TABLE_INVERSION = 4
SAT_TABLE_ORBPOS = 5
SAT_TABLE_SYSTEM = 6
SAT_TABLE_MODULATION = 7
SAT_TABLE_ROLLOFF = 8
SAT_TABLE_PILOT = 9
SAT_TABLE_TSID = 10
SAT_TABLE_ONID = 11
SAT_TABLE_NAME = 12
def __init__(self, session, feid, nr_of_ports, simple_tone, simple_sat_change):
Screen.__init__(self, session)
self["statusbar"] = StaticText(" ")
self["tunerstatusbar"] = StaticText(" ")
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
self["config"].list = self.list
self["config"].l.setList(self.list)
self["key_red"] = StaticText(_("Abort"))
self.session.pipshown = False
self.index = 0
self.port_index = 0
self.feid = feid
self.nr_of_ports = nr_of_ports
self.simple_tone = simple_tone
self.simple_sat_change = simple_sat_change
self.found_sats = []
if not self.openFrontend():
self.oldref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
if not self.openFrontend():
if self.session.pipshown:
if hasattr(self.session, 'infobar'):
if self.session.infobar.servicelist and self.session.infobar.servicelist.dopipzap:
self.session.infobar.servicelist.togglePipzap()
if hasattr(self.session, 'pip'):
del self.session.pip
self.session.pipshown = False
if not self.openFrontend():
self.frontend = None
self.raw_channel = None
if self.raw_channel:
self.raw_channel.receivedTsidOnid.get().append(self.gotTsidOnid)
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.keyCancel,
}, -2)
self.count = 0
self.state = 0
self.abort = False
self.statusTimer = eTimer()
self.statusTimer.callback.append(self.statusCallback)
self.tunerStatusTimer = eTimer()
self.tunerStatusTimer.callback.append(self.tunerStatusCallback)
self.startStatusTimer()
self.onClose.append(self.__onClose)
def __onClose(self):
if self.raw_channel:
self.raw_channel.receivedTsidOnid.get().remove(self.gotTsidOnid)
def keyCancel(self):
self.abort = True
def keyOK(self):
return
def keyLeft(self):
return
def keyRight(self):
return
def openFrontend(self):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
self.raw_channel = res_mgr.allocateRawChannel(self.feid)
if self.raw_channel:
self.frontend = self.raw_channel.getFrontend()
if self.frontend:
return True
return False
def statusCallback(self):
if self.state == 0:
if self.port_index == 0:
self.clearNimEntries()
config.Nims[self.feid].diseqcA.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
elif self.port_index == 1:
self.clearNimEntries()
config.Nims[self.feid].diseqcB.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
elif self.port_index == 2:
self.clearNimEntries()
config.Nims[self.feid].diseqcC.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
elif self.port_index == 3:
self.clearNimEntries()
config.Nims[self.feid].diseqcD.value = "%d" % (self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS])
if self.nr_of_ports == 4:
config.Nims[self.feid].diseqcMode.value = "diseqc_a_b_c_d"
elif self.nr_of_ports == 2:
config.Nims[self.feid].diseqcMode.value = "diseqc_a_b"
else:
config.Nims[self.feid].diseqcMode.value = "single"
config.Nims[self.feid].configMode.value = "simple"
config.Nims[self.feid].simpleDiSEqCSetVoltageTone = self.simple_tone
config.Nims[self.feid].simpleDiSEqCOnlyOnSatChange = self.simple_sat_change
self.saveAndReloadNimConfig()
self.state += 1
elif self.state == 1:
InitNimManager(nimmanager)
self.tuner = Tuner(self.frontend)
if self.raw_channel:
self.raw_channel.requestTsidOnid()
self.tuner.tune(self.sat_frequencies[self.index])
self["statusbar"].setText(_("Checking tuner %d\nDiSEqC port %s for %s") % (self.feid, self.diseqc_ports[self.port_index], self.sat_frequencies[self.index][self.SAT_TABLE_NAME]))
self["tunerstatusbar"].setText(" ")
self.count = 0
self.state = 0
self.startTunerStatusTimer()
return
self.startStatusTimer()
def startStatusTimer(self):
self.statusTimer.start(100, True)
def setupSave(self):
self.clearNimEntries()
for x in self.found_sats:
if x[0] == "A":
config.Nims[self.feid].diseqcA.value = "%d" % (x[1])
elif x[0] == "B":
config.Nims[self.feid].diseqcB.value = "%d" % (x[1])
elif x[0] == "C":
config.Nims[self.feid].diseqcC.value = "%d" % (x[1])
elif x[0] == "D":
config.Nims[self.feid].diseqcD.value = "%d" % (x[1])
self.saveAndReloadNimConfig()
def setupClear(self):
self.clearNimEntries()
self.saveAndReloadNimConfig()
def clearNimEntries(self):
config.Nims[self.feid].diseqcA.value = "3601"
config.Nims[self.feid].diseqcB.value = "3601"
config.Nims[self.feid].diseqcC.value = "3601"
config.Nims[self.feid].diseqcD.value = "3601"
def saveAndReloadNimConfig(self):
config.Nims[self.feid].save()
configfile.save()
configfile.load()
nimmanager.sec.update()
def tunerStatusCallback(self):
dict = {}
if self.frontend:
self.frontend.getFrontendStatus(dict)
else:
self.tunerStopScan(False)
return
self["tunerstatusbar"].setText(_("Tuner status %s") % (dict["tuner_state"]))
if dict["tuner_state"] == "LOSTLOCK" or dict["tuner_state"] == "FAILED":
self.tunerStopScan(False)
return
self.count += 1
if self.count > 10:
self.tunerStopScan(False)
else:
self.startTunerStatusTimer()
def startTunerStatusTimer(self):
self.tunerStatusTimer.start(1000, True)
def gotTsidOnid(self, tsid, onid):
self.tunerStatusTimer.stop()
if tsid == self.sat_frequencies[self.index][self.SAT_TABLE_TSID] and onid == self.sat_frequencies[self.index][self.SAT_TABLE_ONID]:
self.tunerStopScan(True)
else:
self.tunerStopScan(False)
def tunerStopScan(self, result):
if self.abort:
self.setupClear()
self.close(False)
return
if result:
self.found_sats.append((self.diseqc_ports[self.port_index], self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS], self.sat_frequencies[self.index][self.SAT_TABLE_NAME]))
self.index = 0
self.port_index += 1
else:
self.index += 1
if len(self.sat_frequencies) == self.index:
self.index = 0
self.port_index += 1
if len(self.found_sats) > 0:
self.list = []
for x in self.found_sats:
self.list.append(getConfigListEntry((_("DiSEqC port %s: %s") % (x[0], x[2]))))
self["config"].l.setList(self.list)
if self.nr_of_ports == self.port_index:
self.state = 99
self.setupSave()
self.close(len(self.found_sats) > 0)
return
for x in self.found_sats:
if x[1] == self.sat_frequencies[self.index][self.SAT_TABLE_ORBPOS]:
self.tunerStopScan(False)
return
self.startStatusTimer()
|
OpenLD/enigma2-wetek
|
lib/python/Screens/AutoDiseqc.py
|
Python
|
gpl-2.0
| 9,231
|
#!/usr/bin/env python3
import os
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input',
help='the input file')
options = parser.parse_args(sys.argv[1:])
with open(options.input) as f:
content = f.read().strip()
print(content)
|
trhd/meson
|
test cases/common/98 gen extra/srcgen3.py
|
Python
|
apache-2.0
| 291
|
class B33rn4rySetupEventError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class B33rn4ryKegError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
SvenRoederer/b33rn4rycounter
|
B33rn4ryExceptions.py
|
Python
|
gpl-2.0
| 283
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
import json
import logging
from collections import MutableSet
import requests
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('sonarr_list')
class SonarrSet(MutableSet):
supported_ids = ['tvdb_id', 'tvrage_id', 'tvmaze_id', 'imdb_id', 'slug', 'sonarr_id']
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': True},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def series_request_builder(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series list request')
url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def lookup_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series lookup request')
url = '%s://%s:%s%s/api/series/lookup?term=' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def profile_list_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received profile list request')
url = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def rootfolder_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received rootfolder list request')
url = '%s://%s:%s%s/api/Rootfolder' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def get_json(self, url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def post_json(self, url, headers, data):
try:
response = requests.post(url, headers=headers, data=data)
if response.status_code == 201:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def request_builder(self, base_url, request_type, port, api_key):
if request_type == 'series':
return self.series_request_builder(base_url, port, api_key)
elif request_type == 'profile':
return self.profile_list_request(base_url, port, api_key)
elif request_type == 'lookup':
return self.lookup_request(base_url, port, api_key)
elif request_type == 'rootfolder':
return self.rootfolder_request(base_url, port, api_key)
else:
raise plugin.PluginError('Received unknown API request, aborting.')
def translate_quality(self, quality_name):
"""
Translate Sonnar's qualities to ones recognize by Flexget
"""
if quality_name == 'Raw-HD': # No better match yet in Flexget
return 'remux'
elif quality_name == 'DVD': # No better match yet in Flexget
return 'dvdrip'
else:
return quality_name.replace('-', ' ').lower()
def quality_requirement_builder(self, quality_profile):
allowed_qualities = [self.translate_quality(quality['quality']['name']) for quality in quality_profile['items']
if quality['allowed']]
cutoff = self.translate_quality(quality_profile['cutoff']['name'])
return allowed_qualities, cutoff
def list_entries(self):
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
json = self.get_json(series_url, series_headers)
# Retrieves Sonarr's profile list if include_data is set to true
if self.config.get('include_data'):
profile_url, profile_headers = self.request_builder(self.config.get('base_url'), 'profile',
self.config.get('port'),
self.config['api_key'])
profiles_json = self.get_json(profile_url, profile_headers)
entries = []
for show in json:
fg_qualities = '' # Initializes the quality parameter
fg_cutoff = ''
path = None
if not show['monitored'] and self.config.get(
'only_monitored'): # Checks if to retrieve just monitored shows
continue
if show['status'] == 'ended' and not self.config.get('include_ended'): # Checks if to retrieve ended shows
continue
if self.config.get('include_data') and profiles_json: # Check if to retrieve quality & path
path = show.get('path')
for profile in profiles_json:
if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles
fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
entry = Entry(title=show['title'],
url='',
series_name=show['title'],
tvdb_id=show.get('tvdbId'),
tvrage_id=show.get('tvRageId'),
tvmaze_id=show.get('tvMazeId'),
imdb_id=show.get('imdbid'),
slug=show.get('titleSlug'),
sonarr_id=show.get('id'),
configure_series_target=fg_cutoff)
if len(fg_qualities) > 1:
entry['configure_series_qualities'] = fg_qualities
elif len(fg_qualities) == 1:
entry['configure_series_quality'] = fg_qualities[0]
else:
entry['configure_series_quality'] = fg_qualities
if path:
entry['configure_series_path'] = path
if entry.isvalid():
log.debug('returning entry %s', entry)
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
continue
return entries
def add_show(self, entry):
log.debug('searching for show match for %s using Sonarr', entry)
lookup_series_url, lookup_series_headers = self.request_builder(self.config.get('base_url'), 'lookup',
self.config.get('port'), self.config['api_key'])
if entry.get('tvdb_id'):
lookup_series_url += 'tvdb:%s' % entry.get('tvdb_id')
else:
lookup_series_url += entry.get('title')
lookup_results = self.get_json(lookup_series_url, headers=lookup_series_headers)
if not lookup_results:
log.debug('could not find series match to %s', entry)
return
else:
if len(lookup_results) > 1:
log.debug('got multiple results for Sonarr, using first one')
show = lookup_results[0]
log.debug('using show %s', show)
# Getting rootfolder
rootfolder_series_url, rootfolder_series_headers = self.request_builder(self.config.get('base_url'),
'rootfolder', self.config.get('port'),
self.config['api_key'])
rootfolder = self.get_json(rootfolder_series_url, headers=rootfolder_series_headers)
# Setting defaults for Sonarr
show['profileId'] = 1
show['qualityProfileId '] = 1
show['rootFolderPath'] = rootfolder[0]['path']
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
log.debug('adding show %s to sonarr', show)
returned_show = self.post_json(series_url, headers=series_headers, data=json.dumps(show))
return returned_show
def remove_show(self, show):
delete_series_url, delete_series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
delete_series_url += '/%s' % show.get('sonarr_id')
requests.delete(delete_series_url, headers=delete_series_headers)
@property
def shows(self):
if self._shows is None:
self._shows = self.list_entries()
return self._shows
def _find_entry(self, entry):
for sb_entry in self.shows:
if any(entry.get(id) is not None and entry[id] == sb_entry[id] for id in self.supported_ids):
return sb_entry
if entry.get('title').lower() == sb_entry.get('title').lower():
return sb_entry
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
def __init__(self, config):
self.config = config
self._shows = None
def __iter__(self):
return (entry for entry in self.shows)
def __len__(self):
return len(self.shows)
def __contains__(self, entry):
return self._find_entry(entry) is not None
def add(self, entry):
if not self._find_entry(entry):
show = self.add_show(entry)
self._shows = None
log.verbose('Successfully added show %s to Sonarr', show['title'])
else:
log.debug('entry %s already exists in Sonarr list', entry)
def discard(self, entry):
show = self._find_entry(entry)
if not show:
log.debug('Did not find matching show in Sonarr for %s, skipping', entry)
return
self.remove_show(show)
log.verbose('removed show %s from Sonarr', show['title'])
@property
def immutable(self):
return False
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return True
def get(self, entry):
return self._find_entry(entry)
class SonarrList(object):
schema = SonarrSet.schema
@staticmethod
def get_list(config):
return SonarrSet(config)
def on_task_input(self, task, config):
return list(SonarrSet(config))
@event('plugin.register')
def register_plugin():
plugin.register(SonarrList, 'sonarr_list', api_ver=2, groups=['list'])
|
oxc/Flexget
|
flexget/plugins/list/sonarr_list.py
|
Python
|
mit
| 11,933
|
from mutant_django.generator import DjangoBase
def register(app):
app.extend_generator('django', django_json_field)
def django_json_field(gen):
gen.field_generators['JSON'] = JSONField
class JSONField(DjangoBase):
DJANGO_FIELD = 'JSONField'
def render_imports(self):
return ['from jsonfield import JSONField']
|
peterdemin/mutant
|
src/mutant_django_json/__init__.py
|
Python
|
isc
| 341
|
from unittest import main, TestCase
from moderna.ModernaStructure import ModernaStructure
from moderna.analyze.BaseRecognizer import BaseRecognizer
from moderna.sequence.ModernaAlphabet import Alphabet
from moderna.util.Errors import ModernaResidueError
from moderna.modifications import add_modification, remove_modification, exchange_base
from moderna import load_model
from test_data import *
class AddModificationTests(TestCase):
"""
Makes sure modifications can be added to
the four standard bases A,G,C,U in ModernaResidues.
"""
def setUp(self):
"""Loads the A residue to start with."""
self.struc = ModernaStructure('file',A_RESIDUE)
self.adenosine = self.struc['1']
def test_add_to_a(self):
"""Add modification to A."""
add_modification(self.adenosine, 'm1A')
self.assertEqual(BaseRecognizer().identify_resi(self.adenosine),'m1A')
def test_add_to_g(self):
"""Add modification to G."""
exchange_base(self.adenosine, 'G')
add_modification(self.adenosine, 'm1G')
self.assertEqual(BaseRecognizer().identify_resi(self.adenosine),'m1G')
def test_add_to_u(self):
"""Add modification to U."""
exchange_base(self.adenosine,'U')
add_modification(self.adenosine, 'Y')
self.assertEqual(BaseRecognizer().identify_resi(self.adenosine),'Y')
def test_add_to_wrong_base(self):
"""Add modification to A that belongs to G should not work."""
add_modification(self.adenosine, 'm1G')
self.assertEqual(BaseRecognizer().identify_resi(self.adenosine),'m1G')
atoms_m1G=["C1'",'C2',"C2'","C3'",'C4',"C4'",'C5',"C5'",'C6','C8','CM1','N1','N2','N3','N7','N9', "O2'","O3'","O4'","O5'",'O6','OP1','OP2','P']
atoms=[at.name.strip() for at in self.adenosine.child_list]
atoms.sort()
self.assertEqual(atoms_m1G,atoms)
def test_add_to_unk(self):
"""Should be possible to add modification to unknown residue when ribose is present"""
m=load_model(PDB_UNK)
for resi in m:
add_modification(resi, 'm1G')
self.assertEqual(resi.long_abbrev, 'm1G')
def test_all(self):
"""Adding should work for all modifications."""
a = Alphabet()
br = BaseRecognizer()
not_working = []
errors = []
EXCLUDED = ['A','G','C','U',
'?A','?G','?C','?U',# exclude unknown
'X','?X','Xm', 'x',
'preQ0base','Qbase','preQ1base',
'galQtRNA', # indistinguishable from ManQtRNA
'-', '_',
'yW-58','yW-72','yW-86','m8A','fa7d7G', # new in Modomics 2009, not yet in ModeRNA.
'm7Gpp_cap', # not implemented yet
]
SYNONYMS = {'m42C':'m44C','m42Cm':'m44Cm','m62A':'m66A','m62Am':'m66Am'}
for k in a:
if k not in EXCLUDED and a[k].category not in ['unknown', 'standard', 'ligand', 'synthetic', 'stereoisomer', 'insertion', 'missing', ' ']:
struc = ModernaStructure('file',A_RESIDUE)
r = struc['1']
try:
add_modification(r, k)
right = SYNONYMS.get(k,k)
if br.identify_resi(r) != right:
not_working.append(k+','+br.identify_resi(r))
# write file for checking
struc.write_pdb_file('dummies/'+k+'.pdb')
except ModernaResidueError:
raise
errors.append(k)
if not_working or errors:
print '\nTest failed for modifications.'
print 'Different base was recognized:'
print ', '.join(not_working)
print 'ERROR occured:'
print ', '.join(errors)
self.assertEqual(len(not_working) + len(errors),0)
class ExchangeModificationTests(TestCase):
def setUp(self):
"""Loads the A residue to start with."""
self.struc = ModernaStructure('file',A_RESIDUE)
self.adenosine = self.struc['1']
def test_mods_sanity(self):
"""Adding and removing many times should work as well."""
#for mod in ['m1A','m66A','Am','t6A']:
for mod in ['m1A','m6Am','Am','t6A']:
# there is no modification named m66A. There is m6Am
self.assertEqual(BaseRecognizer().identify_resi(self.adenosine),'A')
add_modification(self.adenosine, mod)
self.assertEqual(BaseRecognizer().identify_resi(self.adenosine),mod)
remove_modification(self.adenosine)
def test_dna_exchange(self):
"""All combinations of DNA->DNA exchanges should work."""
bases = ['dT','dA','dG','dC']
br = BaseRecognizer()
r = self.adenosine
for b1 in bases:
add_modification(r, b1)
self.assertEqual(br.identify_resi(r),b1)
for b2 in bases:
remove_modification(r)
add_modification(r, b2)
self.assertEqual(br.identify_resi(r),b2)
if __name__ == '__main__':
main()
|
lenarother/moderna
|
tests/test_modifications/test_add_modification.py
|
Python
|
gpl-3.0
| 5,179
|
import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream
from streamlink.utils import update_scheme
log = logging.getLogger(__name__)
class TVToya(Plugin):
_url_re = re.compile(r"https?://tvtoya.pl/live")
_playlist_re = re.compile(r'data-stream="([^"]+)"')
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
self.session.set_option('hls-live-edge', 10)
res = self.session.http.get(self.url)
playlist_m = self._playlist_re.search(res.text)
if playlist_m:
return HLSStream.parse_variant_playlist(
self.session,
update_scheme(self.url, playlist_m.group(1)),
headers={'Referer': self.url, 'User-Agent': useragents.ANDROID}
)
else:
log.debug("Could not find stream data")
__plugin__ = TVToya
|
beardypig/streamlink
|
src/streamlink/plugins/tvtoya.py
|
Python
|
bsd-2-clause
| 996
|
# Copyright 2015-2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import pytest
from azure.mgmt.storage.models import StorageAccountUpdateParameters
from c7n_azure.constants import BLOB_TYPE, FILE_TYPE, QUEUE_TYPE, TABLE_TYPE
from c7n_azure.resources.storage import StorageSettingsUtilities, StorageFirewallRulesFilter, \
StorageFirewallBypassFilter
from c7n_azure.session import Session
from c7n_azure.storage_utils import StorageUtilities
from mock import patch, MagicMock, Mock
from netaddr import IPSet
from parameterized import parameterized
from c7n.utils import get_annotation_prefix
from c7n.utils import local_session
from ..azure_common import BaseTest, arm_template, cassette_name
class StorageTest(BaseTest):
def setUp(self):
super(StorageTest, self).setUp()
StorageUtilities.get_storage_primary_key.cache_clear()
def test_storage_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-storage',
'resource': 'azure.storage'
}, validate=True)
self.assertTrue(p)
@arm_template('storage.json')
def test_value_filter(self):
p = self.load_policy({
'name': 'test-azure-storage-enum',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_include(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'include': ['1.2.2.129']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_any(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'any': ['1.2.2.128/25', '8.8.8.8', '10.10.10.10']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_not_any(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'any': ['8.8.8.8', '10.10.10.10']}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_not_only(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'only': ['1.2.2.128/25', '10.10.10.10']}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_only(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'only': ['1.2.2.128/25', '3.1.1.1', '10.10.10.10']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_not_include_all_ranges(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'include': ['3.1.1.1', '3.1.1.2-3.1.1.2']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'include': ['1.2.2.128/25']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'include': ['2.2.2.128/25']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'equal': ['3.1.1.1-3.1.1.1', '1.2.2.128/25']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-rules',
'equal': ['3.1.1.1-3.1.1.2', '3.1.1.1-3.1.1.1', '1.2.2.128/25']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('storage.json')
@cassette_name('firewall')
def test_firewall_bypass(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'ccipstorage*'},
{'type': 'firewall-bypass',
'mode': 'equal',
'list': ['AzureServices']}],
})
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('storage.json')
def test_diagnostic_settings_blob_storage_type(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'},
{'type': 'storage-diagnostic-settings',
'storage-type': 'blob',
'key': 'logging.delete',
'value': False}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
self.assertTrue(get_annotation_prefix('blob') in resources[0])
@arm_template('storage.json')
def test_diagnostic_settings_file_storage_type(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'},
{'type': 'storage-diagnostic-settings',
'storage-type': 'file',
'key': 'hour_metrics.enabled',
'value': True}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
self.assertTrue(get_annotation_prefix('file') in resources[0])
@arm_template('storage.json')
def test_diagnostic_settings_queue_storage_type(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'},
{'type': 'storage-diagnostic-settings',
'storage-type': 'queue',
'key': 'logging.delete',
'value': False}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
self.assertTrue(get_annotation_prefix('queue') in resources[0])
@arm_template('storage.json')
def test_diagnostic_settings_table_storage_type(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'},
{'type': 'storage-diagnostic-settings',
'storage-type': 'table',
'key': 'logging.delete',
'value': False}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
self.assertTrue(get_annotation_prefix('table') in resources[0])
@arm_template('storage.json')
def test_enable_log_settings(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cclgstorage*'}],
'actions': [
{
'type': 'set-log-settings',
'storage-types': ['blob', 'queue', 'table'],
'retention': 5,
'log': ['read', 'write', 'delete']
}
]
}, validate=True)
resources = p.run()
self.sleep_in_live_mode(30)
session = local_session(p.session_factory)
token = StorageUtilities.get_storage_token(session)
blob_settings = StorageSettingsUtilities.get_settings(
BLOB_TYPE, resources[0], token=token)
queue_settings = StorageSettingsUtilities.get_settings(
QUEUE_TYPE, resources[0], token=token)
table_settings = StorageSettingsUtilities.get_settings(
TABLE_TYPE, resources[0], session=session)
# assert all logging settings are enabled
self.assertTrue(blob_settings.logging.delete and
blob_settings.logging.read and blob_settings.logging.write)
self.assertTrue(queue_settings.logging.delete and
queue_settings.logging.read and queue_settings.logging.write)
self.assertTrue(table_settings.logging.delete and
table_settings.logging.read and table_settings.logging.write)
# assert retention policy is enabled
self.assertTrue(blob_settings.logging.retention_policy.enabled)
self.assertTrue(queue_settings.logging.retention_policy.enabled)
self.assertTrue(table_settings.logging.retention_policy.enabled)
# assert retention days is set to 5
self.assertEqual(blob_settings.logging.retention_policy.days, 5)
self.assertEqual(table_settings.logging.retention_policy.days, 5)
self.assertEqual(queue_settings.logging.retention_policy.days, 5)
@arm_template('storage.json')
def test_disable_log_settings(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cclgstorage*'}],
'actions': [
{
'type': 'set-log-settings',
'storage-types': ['blob', 'queue', 'table'],
'retention': 5,
'log': ['delete']
}
]
}, validate=True)
resources = p.run()
self.sleep_in_live_mode(30)
session = local_session(p.session_factory)
token = StorageUtilities.get_storage_token(session)
blob_settings = StorageSettingsUtilities.get_settings(
BLOB_TYPE, resources[0], token=token)
queue_settings = StorageSettingsUtilities.get_settings(
QUEUE_TYPE, resources[0], token=token)
table_settings = StorageSettingsUtilities.get_settings(
TABLE_TYPE, resources[0], session=session)
# assert read and write logging settings are disabled
self.assertFalse(blob_settings.logging.read and blob_settings.logging.write)
self.assertFalse(queue_settings.logging.read and queue_settings.logging.write)
self.assertFalse(table_settings.logging.read and table_settings.logging.write)
# assert delete logging settings are enabled
self.assertTrue(blob_settings.logging.delete)
self.assertTrue(queue_settings.logging.delete)
self.assertTrue(table_settings.logging.delete)
@arm_template('storage.json')
@pytest.mark.skiplive
def test_disable_retention_log_settings(self):
p = self.load_policy({
'name': 'test-azure-storage',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cclgstorage*'}],
'actions': [
{
'type': 'set-log-settings',
'storage-types': ['blob', 'queue', 'table'],
'retention': 0,
'log': ['read', 'write', 'delete']
}
]
}, validate=True)
resources = p.run()
session = local_session(p.session_factory)
token = StorageUtilities.get_storage_token(session)
blob_settings = StorageSettingsUtilities.get_settings(
BLOB_TYPE, resources[0], token=token)
queue_settings = StorageSettingsUtilities.get_settings(
QUEUE_TYPE, resources[0], token=token)
table_settings = StorageSettingsUtilities.get_settings(
TABLE_TYPE, resources[0], session=session)
# assert retention policy is disabled
self.assertFalse(blob_settings.logging.retention_policy.enabled)
self.assertFalse(queue_settings.logging.retention_policy.enabled)
self.assertFalse(table_settings.logging.retention_policy.enabled)
@patch('azure.storage.blob.blockblobservice.BlockBlobService.get_blob_service_properties')
def test_storage_settings_get_blob_settings(self, mock_blob_properties_call):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_token = 'mock_token'
StorageSettingsUtilities.get_settings(BLOB_TYPE, mock_storage_account, token=mock_token)
mock_blob_properties_call.assert_called_once()
@patch('azure.storage.file.fileservice.FileService.get_file_service_properties')
@patch('c7n_azure.storage_utils.StorageUtilities.get_storage_primary_key',
return_value='mock_primary_key')
def test_storage_settings_get_file_settings(self, mock_get_storage_key,
mock_file_properties_call):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_session = MagicMock()
StorageSettingsUtilities.get_settings(FILE_TYPE, mock_storage_account, session=mock_session)
mock_get_storage_key.assert_called_with(
'mock_resource_group', 'mock_storage_account', mock_session)
mock_file_properties_call.assert_called_once()
@patch('azure.cosmosdb.table.tableservice.TableService.get_table_service_properties')
@patch('c7n_azure.storage_utils.StorageUtilities.get_storage_primary_key',
return_value='mock_primary_key')
def test_storage_settings_get_table_settings(self, mock_get_storage_key,
mock_get_table_properties):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_session = MagicMock()
StorageSettingsUtilities.get_settings(
TABLE_TYPE, mock_storage_account, session=mock_session)
mock_get_storage_key.assert_called_with(
'mock_resource_group', 'mock_storage_account', mock_session)
mock_get_table_properties.assert_called_once()
@patch('azure.storage.queue.queueservice.QueueService.get_queue_service_properties')
def test_storage_settings_get_queue_settings(self, mock_get_queue_properties):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_token = 'mock_token'
StorageSettingsUtilities.get_settings(
QUEUE_TYPE, mock_storage_account, token=mock_token)
mock_get_queue_properties.assert_called_once()
@patch('azure.storage.queue.queueservice.QueueService.set_queue_service_properties')
def test_storage_settings_update_logging_queue(self, mock_set_queue_properties):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_token = 'mock_token'
log_settings = MagicMock()
StorageSettingsUtilities.update_logging(
QUEUE_TYPE, mock_storage_account, log_settings, token=mock_token)
mock_set_queue_properties.assert_called_once()
@patch('azure.cosmosdb.table.tableservice.TableService.set_table_service_properties')
@patch('c7n_azure.storage_utils.StorageUtilities.get_storage_primary_key',
return_value='mock_primary_key')
def test_storage_settings_update_logging_table(self, mock_get_storage_key,
mock_set_table_properties):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_session = MagicMock()
log_settings = MagicMock()
StorageSettingsUtilities.update_logging(
TABLE_TYPE, mock_storage_account, log_settings, session=mock_session)
mock_get_storage_key.assert_called_with(
'mock_resource_group', 'mock_storage_account', mock_session)
mock_set_table_properties.assert_called_once()
@patch('azure.storage.blob.blockblobservice.BlockBlobService.set_blob_service_properties')
def test_storage_settings_update_logging_blob(self, mock_set_blob_properties):
mock_storage_account = {
"resourceGroup": "mock_resource_group",
"name": "mock_storage_account"
}
mock_token = 'mock_token'
log_settings = MagicMock()
StorageSettingsUtilities.update_logging(
BLOB_TYPE, mock_storage_account, log_settings, token=mock_token)
mock_set_blob_properties.assert_called_once()
def test_storage_settings_require_secure_transfer(self):
with patch('azure.mgmt.storage.v%s.operations.'
'_storage_accounts_operations.StorageAccountsOperations.update'
% self._get_storage_management_client_api_string()) as update_storage_mock:
p = self.load_policy({
'name': 'my-first-policy',
'resource': 'azure.storage',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cctstorage*'}
],
'actions': [
{'type': 'require-secure-transfer',
'value': True}
]
})
p.run()
args = update_storage_mock.call_args_list[0][0]
self.assertEqual(args[0], 'test_storage')
self.assertTrue(args[1].startswith('cctstorage'))
self.assertEqual(args[2],
StorageAccountUpdateParameters(enable_https_traffic_only=True))
def _get_storage_management_client_api_string(self):
return local_session(Session)\
.client('azure.mgmt.storage.StorageManagementClient')\
.DEFAULT_API_VERSION.replace("-", "_")
class StorageFirewallFilterTest(BaseTest):
def test_query_default_allow(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Allow'}}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_deny(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Deny',
'ipRules': [{'value': '10.0.0.0/16'},
{'value': '8.8.8.8'}]}}}
expected = IPSet(['8.8.8.8', '10.0.0.0/16'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def _get_filter(self, mode='equal'):
data = {mode: ['10.0.0.0/8', '127.0.0.1']}
return StorageFirewallRulesFilter(data, Mock())
class StorageFirewallBypassFilterTest(BaseTest):
scenarios = [
['Allow', '', ['AzureServices', 'Metrics', 'Logging']],
['Deny', '', []],
['Deny', 'AzureServices', ['AzureServices']],
['Deny', 'AzureServices, Metrics, Logging', ['AzureServices', 'Metrics', 'Logging']]
]
@parameterized.expand(scenarios)
def test_run(self, default_action, bypass, expected):
resource = {'properties': {'networkAcls': {'defaultAction': default_action,
'bypass': bypass}}}
f = StorageFirewallBypassFilter({'mode': 'equal', 'list': []})
self.assertEqual(expected, f._query_bypass(resource))
|
capitalone/cloud-custodian
|
tools/c7n_azure/tests_azure/tests_resources/test_storage.py
|
Python
|
apache-2.0
| 24,667
|
#
# Copyright (C) 2013 - 2015 Red Hat, Inc.
# Author: Satoru SATOH <ssato redhat.com>
# License: GPLv3+
#
"""/etc/sysconfig/i18n formats:
LANG="en_US.UTF-8"
SYSFONT="latarcyrheb-sun16"
"""
import sos_analyzer.scanner.base
class Scanner(sos_analyzer.scanner.base.SinglePatternScanner):
name = input_name = "etc/sysconfig/i18n"
pattern = r'^(?P<option>[^=]+)="?(?P<value>\S+)"?.*$'
# vim:sw=4:ts=4:et:
|
ssato/sos-analyzer
|
sos_analyzer/scanner/etc_sysconfig_i18n.py
|
Python
|
gpl-3.0
| 413
|
import wpilib
import math
class SharpIR2Y0A02:
'''
Sharp IR sensor GP2Y0A02YK0F
Long distance sensor: 20cm to 150cm
Output is in centimeters
Distance can be calculated using 62.28*x ^ -1.092
'''
def __init__(self,num):
self.distance = wpilib.AnalogInput(num)
def getDistance(self):
'''Returns distance in centimeters'''
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 62.28*math.pow(v, -1.092)
# Constrain output
return max(min(d, 145.0), 22.5)
def getVoltage(self):
return self.distance.getVoltage()
class SharpIRGP2Y0A41SK0F:
'''
Sharp IR sensor GP2Y0A41SK0F
Short distance sensor: 4cm to 40cm
Output is in centimeters=
'''
#short Distance
def __init__(self,num):
self.distance = wpilib.AnalogInput(num)
def getDistance(self):
'''Returns distance in centimeters'''
# Don't allow zero/negative values
v = max(self.distance.getVoltage(), 0.00001)
d = 12.84*math.pow(v, -0.9824)
# Constrain output
return max(min(d, 25), 4.0)
def getVoltage(self):
return self.distance.getVoltage()
class CombinedSensor:
def __init__(self, longDist, longOff, shortDist, shortOff):
self.longDistance = longDist
self.shortDistance = shortDist
self.longOff = longOff
self.shortOff = shortOff
def getDistance(self):
long = self.longDistance.getDistance()
short = self.shortDistance.getDistance()
#if short < 25:
# return short - self.shortOff
#else:
return max(long - self.longOff, 0)
|
frc1418/2015-robot
|
robot/common/distance_sensors.py
|
Python
|
apache-2.0
| 1,861
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-09-11 19:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("plan", "0016_plan_test_dashboard"),
("plan", "0016_auto_20180904_1457"),
]
operations = []
|
SalesforceFoundation/mrbelvedereci
|
metaci/plan/migrations/0017_merge_20180911_1915.py
|
Python
|
bsd-3-clause
| 329
|
import logging
from urllib import urlencode
import datetime
import os
import mimetypes
import cgi
from pylons import config
from genshi.template import MarkupTemplate
from genshi.template.text import NewTextTemplate
from paste.deploy.converters import asbool
import paste.fileapp
import ckan.logic as logic
import ckan.lib.base as base
import ckan.lib.maintain as maintain
import ckan.lib.package_saver as package_saver
import ckan.lib.i18n as i18n
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.lib.accept as accept
import ckan.lib.helpers as h
import ckan.model as model
import ckan.lib.datapreview as datapreview
import ckan.lib.plugins
import ckan.lib.uploader as uploader
import ckan.plugins as p
import ckan.lib.render
from ckan.common import OrderedDict, _, json, request, c, g, response
from home import CACHE_PARAMETERS
log = logging.getLogger(__name__)
render = base.render
abort = base.abort
redirect = base.redirect
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
flatten_to_string_key = logic.flatten_to_string_key
lookup_package_plugin = ckan.lib.plugins.lookup_package_plugin
def _encode_params(params):
return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v))
for k, v in params]
def url_with_params(url, params):
params = _encode_params(params)
return url + u'?' + urlencode(params)
def search_url(params, package_type=None):
if not package_type or package_type == 'dataset':
url = h.url_for(controller='package', action='search')
else:
url = h.url_for('{0}_search'.format(package_type))
return url_with_params(url, params)
class PackageController(base.BaseController):
def _package_form(self, package_type=None):
return lookup_package_plugin(package_type).package_form()
def _setup_template_variables(self, context, data_dict, package_type=None):
return lookup_package_plugin(package_type).\
setup_template_variables(context, data_dict)
def _new_template(self, package_type):
return lookup_package_plugin(package_type).new_template()
def _edit_template(self, package_type):
return lookup_package_plugin(package_type).edit_template()
def _search_template(self, package_type):
return lookup_package_plugin(package_type).search_template()
def _read_template(self, package_type):
return lookup_package_plugin(package_type).read_template()
def _history_template(self, package_type):
return lookup_package_plugin(package_type).history_template()
def _guess_package_type(self, expecting_name=False):
"""
Guess the type of package from the URL handling the case
where there is a prefix on the URL (such as /data/package)
"""
# Special case: if the rot URL '/' has been redirected to the package
# controller (e.g. by an IRoutes extension) then there's nothing to do
# here.
if request.path == '/':
return 'dataset'
parts = [x for x in request.path.split('/') if x]
idx = -1
if expecting_name:
idx = -2
pt = parts[idx]
if pt == 'package':
pt = 'dataset'
return pt
def search(self):
from ckan.lib.search import SearchError
package_type = self._guess_package_type()
try:
context = {'model': model, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
check_access('site_read', context)
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
# unicode format (decoded from utf8)
q = c.q = request.params.get('q', u'')
c.query_error = False
try:
page = int(request.params.get('page', 1))
except ValueError, e:
abort(400, ('"page" parameter must be an integer'))
limit = g.datasets_per_page
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
def drill_down_url(alternative_url=None, **by):
return h.add_url_param(alternative_url=alternative_url,
controller='package', action='search',
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace,
controller='package', action='search')
c.remove_field = remove_field
sort_by = request.params.get('sort', None)
params_nosort = [(k, v) for k, v in params_nopage if k != 'sort']
def _sort_by(fields):
"""
Sort by the given list of fields.
Each entry in the list is a 2-tuple: (fieldname, sort_order)
eg - [('metadata_modified', 'desc'), ('name', 'asc')]
If fields is empty, then the default ordering is used.
"""
params = params_nosort[:]
if fields:
sort_string = ', '.join('%s %s' % f for f in fields)
params.append(('sort', sort_string))
return search_url(params, package_type)
c.sort_by = _sort_by
if sort_by is None:
c.sort_by_fields = []
else:
c.sort_by_fields = [field.split()[0]
for field in sort_by.split(',')]
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params, package_type)
c.search_url_params = urlencode(_encode_params(params_nopage))
try:
c.fields = []
# c.fields_grouped will contain a dict of params containing
# a list of values eg {'tags':['tag1', 'tag2']}
c.fields_grouped = {}
search_extras = {}
fq = ''
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
fq += ' %s:"%s"' % (param, value)
if param not in c.fields_grouped:
c.fields_grouped[param] = [value]
else:
c.fields_grouped[param].append(value)
else:
search_extras[param] = value
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
if package_type and package_type != 'dataset':
# Only show datasets of this particular type
fq += ' +dataset_type:{type}'.format(type=package_type)
else:
# Unless changed via config options, don't show non standard
# dataset types on the default search page
if not asbool(config.get('ckan.search.show_all_types', 'False')):
fq += ' +dataset_type:dataset'
facets = OrderedDict()
default_facet_titles = {
'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license_id': _('Licenses'),
}
for facet in g.facets:
if facet in default_facet_titles:
facets[facet] = default_facet_titles[facet]
else:
facets[facet] = facet
# Facet titles
for plugin in p.PluginImplementations(p.IFacets):
facets = plugin.dataset_facets(facets, package_type)
c.facet_titles = facets
data_dict = {
'q': q,
'fq': fq.strip(),
'facet.field': facets.keys(),
'rows': limit,
'start': (page - 1) * limit,
'sort': sort_by,
'extras': search_extras
}
query = get_action('package_search')(context, data_dict)
c.sort_by_selected = query['sort']
c.page = h.Page(
collection=query['results'],
page=page,
url=pager_url,
item_count=query['count'],
items_per_page=limit
)
c.facets = query['facets']
c.search_facets = query['search_facets']
c.page.items = query['results']
except SearchError, se:
log.error('Dataset search error: %r', se.args)
c.query_error = True
c.facets = {}
c.search_facets = {}
c.page = h.Page(collection=[])
c.search_facets_limits = {}
for facet in c.search_facets.keys():
try:
limit = int(request.params.get('_%s_limit' % facet,
g.facets_default_number))
except ValueError:
abort(400, _('Parameter "{parameter_name}" is not '
'an integer').format(
parameter_name='_%s_limit' % facet
))
c.search_facets_limits[facet] = limit
maintain.deprecate_context_item(
'facets',
'Use `c.search_facets` instead.')
self._setup_template_variables(context, {},
package_type=package_type)
return render(self._search_template(package_type))
def _content_type_from_extension(self, ext):
ct, mu, ext = accept.parse_extension(ext)
if not ct:
return None, None, None,
return ct, ext, (NewTextTemplate, MarkupTemplate)[mu]
def _content_type_from_accept(self):
"""
Given a requested format this method determines the content-type
to set and the genshi template loader to use in order to render
it accurately. TextTemplate must be used for non-xml templates
whilst all that are some sort of XML should use MarkupTemplate.
"""
ct, mu, ext = accept.parse_header(request.headers.get('Accept', ''))
return ct, ext, (NewTextTemplate, MarkupTemplate)[mu]
def resources(self, id):
package_type = self._get_package_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
check_access('package_update', context, data_dict)
except NotAuthorized, e:
abort(401, _('User %r not authorized to edit %s') % (c.user, id))
# check if package exists
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
self._setup_template_variables(context, {'id': id},
package_type=package_type)
return render('package/resources.html')
def read(self, id, format='html'):
if not format == 'html':
ctype, extension, loader = \
self._content_type_from_extension(format)
if not ctype:
# An unknown format, we'll carry on in case it is a
# revision specifier and re-constitute the original id
id = "%s.%s" % (id, format)
ctype, format, loader = "text/html; charset=utf-8", "html", \
MarkupTemplate
else:
ctype, format, loader = self._content_type_from_accept()
response.headers['Content-Type'] = ctype
package_type = self._get_package_type(id.split('@')[0])
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
# interpret @<revision_id> or @<date> suffix
split = id.split('@')
if len(split) == 2:
data_dict['id'], revision_ref = split
if model.is_id(revision_ref):
context['revision_id'] = revision_ref
else:
try:
date = h.date_str_to_datetime(revision_ref)
context['revision_date'] = date
except TypeError, e:
abort(400, _('Invalid revision format: %r') % e.args)
except ValueError, e:
abort(400, _('Invalid revision format: %r') % e.args)
elif len(split) > 2:
abort(400, _('Invalid revision format: %r') %
'Too many "@" symbols')
# check if package exists
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
# used by disqus plugin
c.current_package_id = c.pkg.id
c.related_count = c.pkg.related_count
# can the resources be previewed?
for resource in c.pkg_dict['resources']:
resource['can_be_previewed'] = self._resource_preview(
{'resource': resource, 'package': c.pkg_dict})
self._setup_template_variables(context, {'id': id},
package_type=package_type)
package_saver.PackageSaver().render_package(c.pkg_dict, context)
template = self._read_template(package_type)
template = template[:template.index('.') + 1] + format
try:
return render(template, loader_class=loader)
except ckan.lib.render.TemplateNotFound:
msg = _("Viewing {package_type} datasets in {format} format is "
"not supported (template file {file} not found).".format(
package_type=package_type, format=format, file=template))
abort(404, msg)
assert False, "We should never get here"
def history(self, id):
package_type = self._get_package_type(id.split('@')[0])
if 'diff' in request.params or 'selected1' in request.params:
try:
params = {'id': request.params.getone('pkg_name'),
'diff': request.params.getone('selected1'),
'oldid': request.params.getone('selected2'),
}
except KeyError, e:
if 'pkg_name' in dict(request.params):
id = request.params.getone('pkg_name')
c.error = \
_('Select two revisions before doing the comparison.')
else:
params['diff_entity'] = 'package'
h.redirect_to(controller='revision', action='diff', **params)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg_revisions = get_action('package_revision_list')(context,
data_dict)
# TODO: remove
# Still necessary for the authz check in group/layout.html
c.pkg = context['package']
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
format = request.params.get('format', '')
if format == 'atom':
# Generate and return Atom 1.0 document.
from webhelpers.feedgenerator import Atom1Feed
feed = Atom1Feed(
title=_(u'CKAN Dataset Revision History'),
link=h.url_for(controller='revision', action='read',
id=c.pkg_dict['name']),
description=_(u'Recent changes to CKAN Dataset: ') +
(c.pkg_dict['title'] or ''),
language=unicode(i18n.get_lang()),
)
for revision_dict in c.pkg_revisions:
revision_date = h.date_str_to_datetime(
revision_dict['timestamp'])
try:
dayHorizon = int(request.params.get('days'))
except:
dayHorizon = 30
dayAge = (datetime.datetime.now() - revision_date).days
if dayAge >= dayHorizon:
break
if revision_dict['message']:
item_title = u'%s' % revision_dict['message'].\
split('\n')[0]
else:
item_title = u'%s' % revision_dict['id']
item_link = h.url_for(controller='revision', action='read',
id=revision_dict['id'])
item_description = _('Log message: ')
item_description += '%s' % (revision_dict['message'] or '')
item_author_name = revision_dict['author']
item_pubdate = revision_date
feed.add_item(
title=item_title,
link=item_link,
description=item_description,
author_name=item_author_name,
pubdate=item_pubdate,
)
feed.content_type = 'application/atom+xml'
return feed.writeString('utf-8')
c.related_count = c.pkg.related_count
return render(self._history_template(c.pkg_dict.get('type',
package_type)))
def new(self, data=None, errors=None, error_summary=None):
package_type = self._guess_package_type(True)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params}
# Package needs to have a organization group in the call to
# check_access and also to save it
try:
check_access('package_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to create a package'))
if context['save'] and not data:
return self._save_new(context, package_type=package_type)
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.params, ignore_keys=CACHE_PARAMETERS))))
c.resources_json = h.json.dumps(data.get('resources', []))
# convert tags if not supplied in data
if data and not data.get('tag_string'):
data['tag_string'] = ', '.join(
h.dict_list_reduce(data.get('tags', {}), 'name'))
errors = errors or {}
error_summary = error_summary or {}
# in the phased add dataset we need to know that
# we have already completed stage 1
stage = ['active']
if data.get('state') == 'draft':
stage = ['active', 'complete']
elif data.get('state') == 'draft-complete':
stage = ['active', 'complete', 'complete']
# if we are creating from a group then this allows the group to be
# set automatically
data['group_id'] = request.params.get('group') or \
request.params.get('groups__0__id')
vars = {'data': data, 'errors': errors,
'error_summary': error_summary,
'action': 'new', 'stage': stage}
c.errors_json = h.json.dumps(errors)
self._setup_template_variables(context, {},
package_type=package_type)
# TODO: This check is to maintain backwards compatibility with the
# old way of creating custom forms. This behaviour is now deprecated.
if hasattr(self, 'package_form'):
c.form = render(self.package_form, extra_vars=vars)
else:
c.form = render(self._package_form(package_type=package_type),
extra_vars=vars)
return render(self._new_template(package_type),
extra_vars={'stage': stage})
def resource_edit(self, id, resource_id, data=None, errors=None,
error_summary=None):
if request.method == 'POST' and not data:
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
context = {'model': model, 'session': model.Session,
'api_version': 3, 'for_edit': True,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data['package_id'] = id
try:
if resource_id:
data['id'] = resource_id
get_action('resource_update')(context, data)
else:
get_action('resource_create')(context, data)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.resource_edit(id, resource_id, data,
errors, error_summary)
except NotAuthorized:
abort(401, _('Unauthorized to edit this resource'))
redirect(h.url_for(controller='package', action='resource_read',
id=id, resource_id=resource_id))
context = {'model': model, 'session': model.Session,
'api_version': 3, 'for_edit': True,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
pkg_dict = get_action('package_show')(context, {'id': id})
if pkg_dict['state'].startswith('draft'):
# dataset has not yet been fully created
resource_dict = get_action('resource_show')(context, {'id': resource_id})
fields = ['url', 'resource_type', 'format', 'name', 'description', 'id']
data = {}
for field in fields:
data[field] = resource_dict[field]
return self.new_resource(id, data=data)
# resource is fully created
try:
resource_dict = get_action('resource_show')(context, {'id': resource_id})
except NotFound:
abort(404, _('Resource not found'))
c.pkg_dict = pkg_dict
c.resource = resource_dict
# set the form action
c.form_action = h.url_for(controller='package',
action='resource_edit',
resource_id=resource_id,
id=id)
if not data:
data = resource_dict
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new'}
return render('package/resource_edit.html', extra_vars=vars)
def new_resource(self, id, data=None, errors=None, error_summary=None):
''' FIXME: This is a temporary action to allow styling of the
forms. '''
if request.method == 'POST' and not data:
save_action = request.params.get('save')
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
resource_id = data['id']
del data['id']
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
# see if we have any data that we are trying to save
data_provided = False
for key, value in data.iteritems():
if ((value or isinstance(value, cgi.FieldStorage))
and key != 'resource_type'):
data_provided = True
break
if not data_provided and save_action != "go-dataset-complete":
if save_action == 'go-dataset':
# go to final stage of adddataset
redirect(h.url_for(controller='package',
action='edit', id=id))
# see if we have added any resources
try:
data_dict = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to update dataset'))
except NotFound:
abort(404,
_('The dataset {id} could not be found.').format(id=id))
if not len(data_dict['resources']):
# no data so keep on page
msg = _('You must add at least one data resource')
# On new templates do not use flash message
if g.legacy_templates:
h.flash_error(msg)
redirect(h.url_for(controller='package',
action='new_resource', id=id))
else:
errors = {}
error_summary = {_('Error'): msg}
return self.new_resource(id, data, errors, error_summary)
# we have a resource so let them add metadata
redirect(h.url_for(controller='package',
action='new_metadata', id=id))
data['package_id'] = id
try:
if resource_id:
data['id'] = resource_id
get_action('resource_update')(context, data)
else:
get_action('resource_create')(context, data)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new_resource(id, data, errors, error_summary)
except NotAuthorized:
abort(401, _('Unauthorized to create a resource'))
except NotFound:
abort(404,
_('The dataset {id} could not be found.').format(id=id))
if save_action == 'go-metadata':
# go to final stage of add dataset
redirect(h.url_for(controller='package',
action='new_metadata', id=id))
elif save_action == 'go-dataset':
# go to first stage of add dataset
redirect(h.url_for(controller='package',
action='edit', id=id))
elif save_action == 'go-dataset-complete':
# go to first stage of add dataset
redirect(h.url_for(controller='package',
action='read', id=id))
else:
# add more resources
redirect(h.url_for(controller='package',
action='new_resource', id=id))
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'new'}
vars['pkg_name'] = id
# get resources for sidebar
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
pkg_dict = get_action('package_show')(context, {'id': id})
except NotFound:
abort(404, _('The dataset {id} could not be found.').format(id=id))
# required for nav menu
vars['pkg_dict'] = pkg_dict
template = 'package/new_resource_not_draft.html'
if pkg_dict['state'] == 'draft':
vars['stage'] = ['complete', 'active']
template = 'package/new_resource.html'
elif pkg_dict['state'] == 'draft-complete':
vars['stage'] = ['complete', 'active', 'complete']
template = 'package/new_resource.html'
return render(template, extra_vars=vars)
def new_metadata(self, id, data=None, errors=None, error_summary=None):
''' FIXME: This is a temporary action to allow styling of the
forms. '''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
if request.method == 'POST' and not data:
save_action = request.params.get('save')
data = data or clean_dict(dict_fns.unflatten(tuplize_dict(parse_params(
request.POST))))
# we don't want to include save as it is part of the form
del data['save']
data_dict = get_action('package_show')(context, {'id': id})
data_dict['id'] = id
# update the state
if save_action == 'finish':
# we want this to go live when saved
data_dict['state'] = 'active'
elif save_action in ['go-resources', 'go-dataset']:
data_dict['state'] = 'draft-complete'
# allow the state to be changed
context['allow_state_change'] = True
data_dict.update(data)
try:
get_action('package_update')(context, data_dict)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new_metadata(id, data, errors, error_summary)
except NotAuthorized:
abort(401, _('Unauthorized to update dataset'))
if save_action == 'go-resources':
# we want to go back to the add resources form stage
redirect(h.url_for(controller='package',
action='new_resource', id=id))
elif save_action == 'go-dataset':
# we want to go back to the add dataset stage
redirect(h.url_for(controller='package',
action='edit', id=id))
redirect(h.url_for(controller='package', action='read', id=id))
if not data:
data = get_action('package_show')(context, {'id': id})
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
vars['pkg_name'] = id
package_type = self._get_package_type(id)
self._setup_template_variables(context, {},
package_type=package_type)
return render('package/new_package_metadata.html', extra_vars=vars)
def edit(self, id, data=None, errors=None, error_summary=None):
package_type = self._get_package_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'save': 'save' in request.params,
'moderated': config.get('moderated'),
'pending': True}
if context['save'] and not data:
return self._save_edit(id, context, package_type=package_type)
try:
c.pkg_dict = get_action('package_show')(context, {'id': id})
context['for_edit'] = True
old_data = get_action('package_show')(context, {'id': id})
# old data is from the database and data is passed from the
# user if there is a validation error. Use users data if there.
if data:
old_data.update(data)
data = old_data
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
# are we doing a multiphase add?
if data.get('state', '').startswith('draft'):
c.form_action = h.url_for(controller='package', action='new')
c.form_style = 'new'
return self.new(data=data, errors=errors,
error_summary=error_summary)
c.pkg = context.get("package")
c.resources_json = h.json.dumps(data.get('resources', []))
try:
check_access('package_update', context)
except NotAuthorized, e:
abort(401, _('User %r not authorized to edit %s') % (c.user, id))
# convert tags if not supplied in data
if data and not data.get('tag_string'):
data['tag_string'] = ', '.join(h.dict_list_reduce(
c.pkg_dict.get('tags', {}), 'name'))
errors = errors or {}
vars = {'data': data, 'errors': errors,
'error_summary': error_summary, 'action': 'edit'}
c.errors_json = h.json.dumps(errors)
self._setup_template_variables(context, {'id': id},
package_type=package_type)
c.related_count = c.pkg.related_count
# we have already completed stage 1
vars['stage'] = ['active']
if data.get('state') == 'draft':
vars['stage'] = ['active', 'complete']
elif data.get('state') == 'draft-complete':
vars['stage'] = ['active', 'complete', 'complete']
# TODO: This check is to maintain backwards compatibility with the
# old way of creating custom forms. This behaviour is now deprecated.
if hasattr(self, 'package_form'):
c.form = render(self.package_form, extra_vars=vars)
else:
c.form = render(self._package_form(package_type=package_type),
extra_vars=vars)
return render(self._edit_template(package_type),
extra_vars={'stage': vars['stage']})
def read_ajax(self, id, revision=None):
package_type = self._get_package_type(id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'revision_id': revision}
try:
data = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
data.pop('tags')
data = flatten_to_string_key(data)
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(data)
def history_ajax(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
pkg_revisions = get_action('package_revision_list')(
context, data_dict)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
data = []
approved = False
for num, revision in enumerate(pkg_revisions):
if not approved and revision['approved_timestamp']:
current_approved, approved = True, True
else:
current_approved = False
data.append({'revision_id': revision['id'],
'message': revision['message'],
'timestamp': revision['timestamp'],
'author': revision['author'],
'approved': bool(revision['approved_timestamp']),
'current_approved': current_approved})
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(data)
def _get_package_type(self, id):
"""
Given the id of a package it determines the plugin to load
based on the package's type name (type). The plugin found
will be returned, or None if there is no plugin associated with
the type.
"""
pkg = model.Package.get(id)
if pkg:
return pkg.type or 'dataset'
return None
def _tag_string_to_list(self, tag_string):
''' This is used to change tags from a sting to a list of dicts '''
out = []
for tag in tag_string.split(','):
tag = tag.strip()
if tag:
out.append({'name': tag,
'state': 'active'})
return out
def _save_new(self, context, package_type=None):
# The staged add dataset used the new functionality when the dataset is
# partially created so we need to know if we actually are updating or
# this is a real new.
is_an_update = False
ckan_phase = request.params.get('_ckan_phase')
from ckan.lib.search import SearchIndexError
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
if ckan_phase:
# prevent clearing of groups etc
context['allow_partial_update'] = True
# sort the tags
data_dict['tags'] = self._tag_string_to_list(
data_dict['tag_string'])
if data_dict.get('pkg_name'):
is_an_update = True
# This is actually an update not a save
data_dict['id'] = data_dict['pkg_name']
del data_dict['pkg_name']
# this is actually an edit not a save
pkg_dict = get_action('package_update')(context, data_dict)
if request.params['save'] == 'go-metadata':
# redirect to add metadata
url = h.url_for(controller='package',
action='new_metadata',
id=pkg_dict['name'])
else:
# redirect to add dataset resources
url = h.url_for(controller='package',
action='new_resource',
id=pkg_dict['name'])
redirect(url)
# Make sure we don't index this dataset
if request.params['save'] not in ['go-resource', 'go-metadata']:
data_dict['state'] = 'draft'
# allow the state to be changed
context['allow_state_change'] = True
data_dict['type'] = package_type
context['message'] = data_dict.get('log_message', '')
pkg_dict = get_action('package_create')(context, data_dict)
if ckan_phase:
# redirect to add dataset resources
url = h.url_for(controller='package',
action='new_resource',
id=pkg_dict['name'])
redirect(url)
self._form_save_redirect(pkg_dict['name'], 'new', package_type=package_type)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % '')
except NotFound, e:
abort(404, _('Dataset not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except SearchIndexError, e:
try:
exc_str = unicode(repr(e.args))
except Exception: # We don't like bare excepts
exc_str = unicode(str(e))
abort(500, _(u'Unable to add package to search index.') + exc_str)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
if is_an_update:
# we need to get the state of the dataset to show the stage we
# are on.
pkg_dict = get_action('package_show')(context, data_dict)
data_dict['state'] = pkg_dict['state']
return self.edit(data_dict['id'], data_dict,
errors, error_summary)
data_dict['state'] = 'none'
return self.new(data_dict, errors, error_summary)
def _save_edit(self, name_or_id, context, package_type=None):
from ckan.lib.search import SearchIndexError
log.debug('Package save request name: %s POST: %r',
name_or_id, request.POST)
try:
data_dict = clean_dict(dict_fns.unflatten(
tuplize_dict(parse_params(request.POST))))
if '_ckan_phase' in data_dict:
# we allow partial updates to not destroy existing resources
context['allow_partial_update'] = True
data_dict['tags'] = self._tag_string_to_list(
data_dict['tag_string'])
del data_dict['_ckan_phase']
del data_dict['save']
context['message'] = data_dict.get('log_message', '')
if not context['moderated']:
context['pending'] = False
data_dict['id'] = name_or_id
pkg = get_action('package_update')(context, data_dict)
if request.params.get('save', '') == 'Approve':
get_action('make_latest_pending_package_active')(
context, data_dict)
c.pkg = context['package']
c.pkg_dict = pkg
self._form_save_redirect(pkg['name'], 'edit', package_type=package_type)
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
except NotFound, e:
abort(404, _('Dataset not found'))
except dict_fns.DataError:
abort(400, _(u'Integrity Error'))
except SearchIndexError, e:
try:
exc_str = unicode(repr(e.args))
except Exception: # We don't like bare excepts
exc_str = unicode(str(e))
abort(500, _(u'Unable to update search index.') + exc_str)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(name_or_id, data_dict, errors, error_summary)
def _form_save_redirect(self, pkgname, action, package_type=None):
'''This redirects the user to the CKAN package/read page,
unless there is request parameter giving an alternate location,
perhaps an external website.
@param pkgname - Name of the package just edited
@param action - What the action of the edit was
'''
assert action in ('new', 'edit')
url = request.params.get('return_to') or \
config.get('package_%s_return_url' % action)
if url:
url = url.replace('<NAME>', pkgname)
else:
if package_type is None or package_type == 'dataset':
url = h.url_for(controller='package', action='read', id=pkgname)
else:
url = h.url_for('{0}_read'.format(package_type), id=pkgname)
redirect(url)
def _adjust_license_id_options(self, pkg, fs):
options = fs.license_id.render_opts['options']
is_included = False
for option in options:
license_id = option[1]
if license_id == pkg.license_id:
is_included = True
if not is_included:
options.insert(1, (pkg.license_id, pkg.license_id))
def delete(self, id):
if 'cancel' in request.params:
h.redirect_to(controller='package', action='edit', id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
check_access('package_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete package %s') % '')
try:
if request.method == 'POST':
get_action('package_delete')(context, {'id': id})
h.flash_notice(_('Dataset has been deleted.'))
h.redirect_to(controller='package', action='search')
c.pkg_dict = get_action('package_show')(context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete package %s') % '')
except NotFound:
abort(404, _('Dataset not found'))
return render('package/confirm_delete.html')
def resource_delete(self, id, resource_id):
if 'cancel' in request.params:
h.redirect_to(controller='package', action='resource_edit', resource_id=resource_id, id=id)
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
check_access('package_delete', context, {'id': id})
except NotAuthorized:
abort(401, _('Unauthorized to delete package %s') % '')
try:
if request.method == 'POST':
get_action('resource_delete')(context, {'id': resource_id})
h.flash_notice(_('Resource has been deleted.'))
h.redirect_to(controller='package', action='read', id=id)
c.resource_dict = get_action('resource_show')(context, {'id': resource_id})
c.pkg_id = id
except NotAuthorized:
abort(401, _('Unauthorized to delete resource %s') % '')
except NotFound:
abort(404, _('Resource not found'))
return render('package/confirm_delete_resource.html')
def autocomplete(self):
# DEPRECATED in favour of /api/2/util/dataset/autocomplete
q = unicode(request.params.get('q', ''))
if not len(q):
return ''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'q': q}
packages = get_action('package_autocomplete')(context, data_dict)
pkg_list = []
for pkg in packages:
pkg_list.append('%s|%s' % (pkg['match_displayed'].
replace('|', ' '), pkg['name']))
return '\n'.join(pkg_list)
def _render_edit_form(self, fs, params={}, clear_session=False):
# errors arrive in c.error and fs.errors
c.log_message = params.get('log_message', '')
# rgrp: expunge everything from session before dealing with
# validation errors) so we don't have any problematic saves
# when the fs.render causes a flush.
# seb: If the session is *expunged*, then the form can't be
# rendered; I've settled with a rollback for now, which isn't
# necessarily what's wanted here.
# dread: I think this only happened with tags because until
# this changeset, Tag objects were created in the Renderer
# every time you hit preview. So I don't believe we need to
# clear the session any more. Just in case I'm leaving it in
# with the log comments to find out.
if clear_session:
# log to see if clearing the session is ever required
if model.Session.new or model.Session.dirty or \
model.Session.deleted:
log.warn('Expunging session changes which were not expected: '
'%r %r %r', (model.Session.new, model.Session.dirty,
model.Session.deleted))
try:
model.Session.rollback()
except AttributeError:
# older SQLAlchemy versions
model.Session.clear()
edit_form_html = fs.render()
c.form = h.literal(edit_form_html)
return h.literal(render('package/edit_form.html'))
def _update_authz(self, fs):
validation = fs.validate()
if not validation:
c.form = self._render_edit_form(fs, request.params)
raise package_saver.ValidationException(fs)
try:
fs.sync()
except Exception, inst:
model.Session.rollback()
raise
else:
model.Session.commit()
def resource_read(self, id, resource_id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj, "for_view":True}
try:
c.resource = get_action('resource_show')(context,
{'id': resource_id})
c.package = get_action('package_show')(context, {'id': id})
# required for nav menu
c.pkg = context['package']
c.pkg_dict = c.package
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
# get package license info
license_id = c.package.get('license_id')
try:
c.package['isopen'] = model.Package.\
get_license_register()[license_id].isopen()
except KeyError:
c.package['isopen'] = False
# TODO: find a nicer way of doing this
c.datastore_api = '%s/api/action' % config.get('ckan.site_url', '').rstrip('/')
c.related_count = c.pkg.related_count
c.resource['can_be_previewed'] = self._resource_preview(
{'resource': c.resource, 'package': c.package})
return render('package/resource_read.html')
def _resource_preview(self, data_dict):
return bool(datapreview.res_format(data_dict['resource'])
in datapreview.direct() + datapreview.loadable()
or datapreview.get_preview_plugin(
data_dict, return_first=True))
def resource_download(self, id, resource_id, filename=None):
"""
Provides a direct download by either redirecting the user to the url stored
or downloading an uploaded file directly.
"""
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
rsc = get_action('resource_show')(context, {'id': resource_id})
pkg = get_action('package_show')(context, {'id': id})
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
if rsc.get('url_type') == 'upload':
upload = uploader.ResourceUpload(rsc)
filepath = upload.get_path(rsc['id'])
fileapp = paste.fileapp.FileApp(filepath)
try:
status, headers, app_iter = request.call_application(fileapp)
except OSError:
abort(404, _('Resource data not found'))
response.headers.update(dict(headers))
content_type, content_enc = mimetypes.guess_type(rsc.get('url',''))
response.headers['Content-Type'] = content_type
response.status = status
return app_iter
elif not 'url' in rsc:
abort(404, _('No download is available'))
redirect(rsc['url'])
def follow(self, id):
'''Start following this dataset.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('follow_dataset')(context, data_dict)
package_dict = get_action('package_show')(context, data_dict)
h.flash_success(_("You are now following {0}").format(
package_dict['title']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except NotAuthorized as e:
h.flash_error(e.extra_msg)
h.redirect_to(controller='package', action='read', id=id)
def unfollow(self, id):
'''Stop following this dataset.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('unfollow_dataset')(context, data_dict)
package_dict = get_action('package_show')(context, data_dict)
h.flash_success(_("You are no longer following {0}").format(
package_dict['title']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except (NotFound, NotAuthorized) as e:
error_message = e.extra_msg or e.message
h.flash_error(error_message)
h.redirect_to(controller='package', action='read', id=id)
def followers(self, id=None):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
c.followers = get_action('dataset_follower_list')(context,
{'id': c.pkg_dict['id']})
c.related_count = c.pkg.related_count
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read package %s') % id)
return render('package/followers.html')
def groups(self, id):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj, 'use_cache': False}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read dataset %s') % id)
if request.method == 'POST':
new_group = request.POST.get('group_added')
if new_group:
data_dict = {"id": new_group,
"object": id,
"object_type": 'package',
"capacity": 'public'}
try:
get_action('member_create')(context, data_dict)
except NotFound:
abort(404, _('Group not found'))
removed_group = None
for param in request.POST:
if param.startswith('group_remove'):
removed_group = param.split('.')[-1]
break
if removed_group:
data_dict = {"id": removed_group,
"object": id,
"object_type": 'package'}
try:
get_action('member_delete')(context, data_dict)
except NotFound:
abort(404, _('Group not found'))
redirect(h.url_for(controller='package',
action='groups', id=id))
context['is_member'] = True
users_groups = get_action('group_list_authz')(context, data_dict)
pkg_group_ids = set(group['id'] for group
in c.pkg_dict.get('groups', []))
user_group_ids = set(group['id'] for group
in users_groups)
c.group_dropdown = [[group['id'], group['display_name']]
for group in users_groups if
group['id'] not in pkg_group_ids]
for group in c.pkg_dict.get('groups', []):
group['user_member'] = (group['id'] in user_group_ids)
return render('package/group_list.html')
def activity(self, id):
'''Render this package's public activity stream page.'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'for_view': True,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
c.pkg_dict = get_action('package_show')(context, data_dict)
c.pkg = context['package']
c.package_activity_stream = get_action(
'package_activity_list_html')(context,
{'id': c.pkg_dict['id']})
c.related_count = c.pkg.related_count
except NotFound:
abort(404, _('Dataset not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read dataset %s') % id)
return render('package/activity.html')
def resource_embedded_dataviewer(self, id, resource_id,
width=500, height=500):
"""
Embeded page for a read-only resource dataview. Allows
for width and height to be specified as part of the
querystring (as well as accepting them via routes).
"""
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj}
try:
c.resource = get_action('resource_show')(context,
{'id': resource_id})
c.package = get_action('package_show')(context, {'id': id})
c.resource_json = h.json.dumps(c.resource)
# double check that the resource belongs to the specified package
if not c.resource['id'] in [r['id']
for r in c.package['resources']]:
raise NotFound
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
# Construct the recline state
state_version = int(request.params.get('state_version', '1'))
recline_state = self._parse_recline_state(request.params)
if recline_state is None:
abort(400, ('"state" parameter must be a valid recline '
'state (version %d)' % state_version))
c.recline_state = h.json.dumps(recline_state)
c.width = max(int(request.params.get('width', width)), 100)
c.height = max(int(request.params.get('height', height)), 100)
c.embedded = True
return render('package/resource_embedded_dataviewer.html')
def _parse_recline_state(self, params):
state_version = int(request.params.get('state_version', '1'))
if state_version != 1:
return None
recline_state = {}
for k, v in request.params.items():
try:
v = h.json.loads(v)
except ValueError:
pass
recline_state[k] = v
recline_state.pop('width', None)
recline_state.pop('height', None)
recline_state['readOnly'] = True
# previous versions of recline setup used elasticsearch_url attribute
# for data api url - see http://trac.ckan.org/ticket/2639
# fix by relocating this to url attribute which is the default location
if 'dataset' in recline_state and 'elasticsearch_url' in recline_state['dataset']:
recline_state['dataset']['url'] = recline_state['dataset']['elasticsearch_url']
# Ensure only the currentView is available
# default to grid view if none specified
if not recline_state.get('currentView', None):
recline_state['currentView'] = 'grid'
for k in recline_state.keys():
if k.startswith('view-') and \
not k.endswith(recline_state['currentView']):
recline_state.pop(k)
return recline_state
def resource_datapreview(self, id, resource_id):
'''
Embeded page for a resource data-preview.
Depending on the type, different previews are loaded. This could be an
img tag where the image is loaded directly or an iframe that embeds a
webpage, recline or a pdf preview.
'''
context = {
'model': model,
'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj
}
try:
c.resource = get_action('resource_show')(context,
{'id': resource_id})
c.package = get_action('package_show')(context, {'id': id})
data_dict = {'resource': c.resource, 'package': c.package}
preview_plugin = datapreview.get_preview_plugin(data_dict)
if preview_plugin is None:
abort(409, _('No preview has been defined.'))
preview_plugin.setup_template_variables(context, data_dict)
c.resource_json = json.dumps(c.resource)
except NotFound:
abort(404, _('Resource not found'))
except NotAuthorized:
abort(401, _('Unauthorized to read resource %s') % id)
else:
return render(preview_plugin.preview_template(context, data_dict))
|
sciamlab/ckanext-datigovit
|
custom/ckan/ckan/controllers/package.py
|
Python
|
agpl-3.0
| 62,527
|
#!/usr/bin/env python3
"""
Avalam agent.
Copyright (C) 2015, <<<<<<<<<<< YOUR NAMES HERE >>>>>>>>>>>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import avalam
import minimax
class Agent:
"""This is the skeleton of an agent to play the Avalam game."""
def __init__(self, name="Basic Agent"):
self.name = name
def successors(self, state):
"""The successors function must return (or yield) a list of
pairs (a, s) in which a is the action played to reach the
state s; s is the new state, i.e. a triplet (b, p, st) where
b is the new board after the action a has been played,
p is the player to play the next move and st is the next
step number.
"""
(oldBoard, oldPlayer, oldStepNbr) = state
for action in oldBoard.get_actions():
newBoard = oldBoard.clone()
newBoard.play_action(action)
yield (action, (newBoard, -oldPlayer, oldStepNbr+1))
def cutoff(self, state, depth):
"""The cutoff function returns true if the alpha-beta/minimax
search has to stop; false otherwise.
"""
(oldBoard, oldPlayer, oldStepNbr) = state
if depth >= 2 or oldBoard.is_finished():
return True
else:
return False
def evaluate(self, state):
"""The evaluate function must return an integer value
representing the utility function of the board.
"""
(oldBoard, oldPlayer, oldStepNbr) = state
return oldBoard.get_score()
def play(self, board, player, step, time_left):
"""This function is used to play a move according
to the board, player and time left provided as input.
It must return an action representing the move the player
will perform.
"""
self.time_left = time_left
newBoard = avalam.Board(board.get_percepts(player==avalam.PLAYER2))
state = (newBoard, player, step)
return minimax.search(state, self)
if __name__ == "__main__":
avalam.agent_main(Agent())
|
fthuin/artificial-intelligence
|
assignment3/Code/basic_agent_2.py
|
Python
|
mit
| 2,609
|
# -*- coding: utf-8 -*-
from ..common import get_module_class
class Parser(object):
@staticmethod
def get(parser_name):
clazz = get_module_class(parser_name, __name__)
return clazz()
def loads(self, content):
return content
def dumps(self, content):
return content
def load(self, f):
return NotImplemented
def dump(self, content, f):
return NotImplemented
|
DataCanvasIO/pyDataCanvas
|
datacanvas/dataset/parser/parser.py
|
Python
|
apache-2.0
| 435
|
# coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains diagnostic collection and processing functions."""
import json
import logging
import os
import shutil
import tempfile
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional # noqa: F401
import requests
from netconan import netconan
from requests import HTTPError
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from pybatfish.datamodel.answer import Answer # noqa: F401
from pybatfish.exception import BatfishException
from pybatfish.question.question import QuestionBase
if TYPE_CHECKING:
from pybatfish.client.session import Session # noqa: F401
METADATA_FILENAME = "metadata"
_FILE_PARSE_STATUS_QUESTION = {
"class": "org.batfish.question.initialization.FileParseStatusQuestion",
"differential": False,
"instance": {"instanceName": "__fileParseStatus"},
}
_INIT_INFO_QUESTION = {
"class": "org.batfish.question.InitInfoQuestionPlugin$InitInfoQuestion",
"differential": False,
"instance": {"instanceName": "__initInfo"},
}
_INIT_ISSUES_QUESTION = {
"class": "org.batfish.question.initialization.InitIssuesQuestion",
"differential": False,
"instance": {"instanceName": "__initIssues"},
}
# Note: this is a Tuple to enforce immutability.
_INIT_INFO_QUESTIONS = (
_INIT_INFO_QUESTION,
_INIT_ISSUES_QUESTION,
_FILE_PARSE_STATUS_QUESTION,
)
_S3_BUCKET = "batfish-diagnostics"
_S3_REGION = "us-west-2"
_UPLOAD_MAX_TRIES = 3
_UPLOAD_RETRY_BACKOFF = 0.3
# Setup a session, configure retry policy
_requests_session = requests.Session()
_adapter = HTTPAdapter(
max_retries=Retry(
total=_UPLOAD_MAX_TRIES,
backoff_factor=_UPLOAD_RETRY_BACKOFF,
status_forcelist=[500, 502, 503, 504, 104],
# Retry on all calls, including POST
method_whitelist=False,
)
)
_requests_session.mount("https://", _adapter)
def upload_diagnostics(
session: "Session",
metadata: Dict[str, Any],
bucket: str = _S3_BUCKET,
region: str = _S3_REGION,
dry_run: bool = True,
netconan_config: Optional[str] = None,
questions: Iterable[Dict[str, Any]] = _INIT_INFO_QUESTIONS,
resource_prefix: str = "",
proxy: Optional[str] = None,
) -> str:
"""
Fetch, anonymize, and optionally upload snapshot initialization information.
:param session: Batfish session to use for running diagnostics questions
:type session: :class:`~pybatfish.client.session.Session`
:param metadata: additional metadata to upload with the diagnostics
:type metadata: dict[str, Any]
:param bucket: name of the AWS S3 bucket to upload to
:type bucket: string
:param region: name of the region containing the bucket
:type region: string
:param dry_run: if True, upload is skipped and the anonymized files will be stored locally for review. If False, anonymized files will be uploaded to the specified S3 bucket
:type dry_run: bool
:param netconan_config: path to Netconan configuration file
:type netconan_config: string
:param questions: list of question templates to run and upload
:type questions: list[QuestionBase]
:param resource_prefix: prefix to append to any uploaded resources
:type resource_prefix: str
:param proxy: proxy URL to use when uploading data.
:return: location of anonymized files (local directory if doing dry run, otherwise upload ID)
:rtype: string
"""
logger = logging.getLogger(__name__)
tmp_dir = tempfile.mkdtemp()
try:
for template in questions:
q = QuestionBase(template, session)
instance_name = q.get_name()
try:
ans = q.answer()
if not isinstance(ans, Answer):
raise BatfishException(
"question.answer() did not return an Answer: {}".format(ans)
)
content = json.dumps(ans.dict(), indent=4, sort_keys=True)
except BatfishException as e:
content = "Failed to answer {}: {}".format(instance_name, e)
logger.warning(content)
with open(os.path.join(tmp_dir, instance_name), "w") as f:
f.write(content)
tmp_dir_anon = tempfile.mkdtemp()
if questions:
_anonymize_dir(tmp_dir, tmp_dir_anon, netconan_config)
finally:
shutil.rmtree(tmp_dir)
with open(os.path.join(tmp_dir_anon, METADATA_FILENAME), "w") as f:
f.write(json.dumps(metadata))
if dry_run:
logger.info(
"See anonymized files produced by dry-run here: {}".format(tmp_dir_anon)
)
return tmp_dir_anon
try:
if bucket is None:
raise ValueError("Bucket must be set to upload init info.")
if region is None:
raise ValueError("Region must be set to upload init info.")
# Generate anonymous S3 subdirectory name
anon_dir = "{}{}".format(resource_prefix, uuid.uuid4().hex)
upload_dest = "https://{bucket}.s3-{region}.amazonaws.com/{resource}".format(
bucket=bucket, region=region, resource=anon_dir
)
_upload_dir_to_url(
upload_dest,
tmp_dir_anon,
headers={"x-amz-acl": "bucket-owner-full-control"},
proxies={"https": proxy} if proxy is not None else None,
)
logger.debug("Uploaded files to: {}".format(upload_dest))
finally:
shutil.rmtree(tmp_dir_anon)
return anon_dir
def _anonymize_dir(
input_dir: str, output_dir: str, netconan_config: Optional[str] = None
) -> None:
"""
Anonymize files in input dir and save to output dir.
Uses Netconan with the provided configuration file to perform anonymization. If no configuration is provided, only IP addresses and password will be anonymized.
:param input_dir: directory containing files to anonymize
:type input_dir: string
:param output_dir: directory to store anonymized files in
:type output_dir: string
:param netconan_config: path to Netconan configuration file
:type netconan_config: string
"""
args = ["-i", str(input_dir), "-o", str(output_dir)]
if netconan_config is not None:
args.extend(["-c", netconan_config])
else:
args.extend(["-a", "-p"])
netconan.main(args)
def get_snapshot_parse_status(session):
# type: (Session) -> Dict[str, str]
"""
Get parsing and conversion status for files and nodes in the current snapshot.
:param session: Batfish session to use for getting snapshot parse status
:type session: :class:`~pybatfish.client.session.Session`
:return: dictionary of files and nodes to parse/convert status
:rtype: dict
"""
parse_status = {} # type: Dict[str, str]
try:
answer = QuestionBase(_INIT_INFO_QUESTION, session).answer()
if not isinstance(answer, Answer):
raise BatfishException(
"question.answer() did not return an Answer: {}".format(answer)
)
if "answerElements" not in answer:
raise BatfishException("Invalid answer format for init info")
answer_elements = answer["answerElements"]
if not len(answer_elements):
raise BatfishException("Invalid answer format for init info")
# These statuses contain parse and conversion status
parse_status = answer_elements[0].get("parseStatus", {})
except BatfishException as e:
logging.getLogger(__name__).warning("Failed to check snapshot init info: %s", e)
return parse_status
def check_if_all_passed(statuses):
# type: (Dict[str, str]) -> bool
"""
Check if all items in supplied `statuses` dict passed parsing and conversion.
:param statuses: dictionary init info statuses (files/nodes to their status)
:type statuses: dict
:return: boolean indicating if all files and nodes in current snapshot passed parsing and conversion
:rtype: bool
"""
return all(statuses[key] == "PASSED" for key in statuses)
def check_if_any_failed(statuses):
# type: (Dict[str, str]) -> bool
"""
Check if any item in supplied `statuses` dict failed parsing or conversion.
:param statuses: dictionary init info statuses (files/nodes to their status)
:type statuses: dict
:return: boolean indicating if any file or node in current snapshot failed parsing or conversion
:rtype: bool
"""
return any(statuses[key] == "FAILED" for key in statuses)
def _upload_dir_to_url(
base_url: str,
src_dir: str,
headers: Optional[Dict] = None,
proxies: Optional[Dict] = None,
) -> None:
"""
Recursively put files from the specified directory to the specified URL.
:param base_url: URL to put files to
:type base_url: string
:param src_dir: directory containing files to upload
:type src_dir: string
"""
for root, dirs, files in os.walk(src_dir):
for name in files:
path = os.path.join(root, name)
rel_path = os.path.relpath(path, src_dir)
with open(path, "rb") as data:
resource = "{}/{}".format(base_url, rel_path)
r = _requests_session.put(
resource, data=data, headers=headers, proxies=proxies
)
if r.status_code != 200:
raise HTTPError(
"Failed to upload resource: {} with status code {}".format(
resource, r.status_code
)
)
def warn_on_snapshot_failure(session):
# type: (Session) -> None
"""
Check if snapshot passed and warn about any parsing or conversion issues.
:param session: Batfish session to check for snapshot failure
:type session: :class:`~pybatfish.client.session.Session`
"""
logger = logging.getLogger(__name__)
statuses = get_snapshot_parse_status(session)
if check_if_any_failed(statuses):
logger.warning(
"""\
Your snapshot was initialized but Batfish failed to parse one or more input files. You can proceed but some analyses may be incorrect. You can help the Batfish developers improve support for your network by running:
bf.upload_diagnostics(dry_run=False, contact_info='<optional email address>')
to share private, anonymized information. For more information, see the documentation with:
help(bf.upload_diagnostics)"""
)
elif not check_if_all_passed(statuses):
logger.warning(
"""\
Your snapshot was successfully initialized but Batfish failed to fully recognized some lines in one or more input files. Some unrecognized configuration lines are not uncommon for new networks, and it is often fine to proceed with further analysis. You can help the Batfish developers improve support for your network by running:
bf.upload_diagnostics(dry_run=False, contact_info='<optional email address>')
to share private, anonymized information. For more information, see the documentation with:
help(bf.upload_diagnostics)"""
)
|
batfish/pybatfish
|
pybatfish/client/_diagnostics.py
|
Python
|
apache-2.0
| 11,725
|
#!/usr/bin/python
import urllib2
import tempfile
import json
# pprint to print json data on screen
from pprint import pprint
# fetching Google data
url = "http://finance.google.com/finance/info?q=rpower"
response = urllib2.urlopen(url)
#storing google data in temp file
temp_html = tempfile.NamedTemporaryFile(mode='w+')
temp_html.write(response.read())
temp_html.seek(0)
# deleting unnecessary characters and making valid json temp file
temp_json = tempfile.NamedTemporaryFile(mode='w+')
delete_list = ["//" , "\n"]
for line in temp_html:
for word in delete_list:
line = line.replace(word, "")
temp_json.write(line)
temp_html.close()
temp_json.seek(0)
# reading the json data from file
data = json.load(temp_json)
# printing json data stored in variable
# print "pretty print"
# pprint(data)
print "Fetched Data"
print data[0]['c_fix'] # change fix decimal
print data[0]['ccol'] # ??? usuall "chr"
print data[0]['cp_fix'] # closing percentage fix decimal
print data[0]['e'] # Stock type NSE
print data[0]['id'] # ID
print data[0]['l_fix'] # last value fix decimal
print data[0]['lt'] # last trans Jul 30, 3:29PM GMT+5:30
print data[0]['lt_dts'] # last trans date 2014-07-30T15:29:59Z
print data[0]['ltt'] # last trans time 3:29PM GMT+5:30
print data[0]['pcls_fix'] # prev day closing with fixed decimal
print data[0]['s'] # ? usually "0"
print data[0]['t'] # stock
# closing files
# temp_html.close()
temp_json.close()
# printing file names
# print temp_html.name
# print temp_json.name
|
krthkj/learningPython
|
GoogleJson.py
|
Python
|
mit
| 1,529
|
#!/usr/bin/python
import functools
import optparse
import sys
# Install the Python unittest2 package before you run this script.
import unittest
import os
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
#: whether to skip tests which require authentication
SKIP_AUTH_TESTS = int(os.environ.get('SKIP_AUTH_TESTS', True))
skip_auth = functools.partial(
unittest.skipIf, SKIP_AUTH_TESTS, 'Skipped test which needs authentication'
)
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
# Uncomment to get logging during tests
# logging.getLogger().setLevel(logging.DEBUG)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 2:
print 'Error: Exactly 2 arguments required.'
parser.print_help()
sys.exit(1)
SDK_PATH = args[0]
TEST_PATH = args[1]
main(SDK_PATH, TEST_PATH)
|
balanced/status.balancedpayments.com
|
tests/__init__.py
|
Python
|
mit
| 1,201
|
import numpy as np
import pandas as pd
# from scipy.stats import gamma
np.random.seed(181336)
number_regions = 5
number_strata = 10
number_units = 5000
units = np.linspace(0, number_units - 1, number_units, dtype="int16") + 10 * number_units
units = units.astype("str")
sample = pd.DataFrame(units)
sample.rename(columns={0: "unit_id"}, inplace=True)
sample["region_id"] = "xx"
for i in range(number_units):
sample.loc[i]["region_id"] = sample.iloc[i]["unit_id"][0:2]
sample["cluster_id"] = "xxx"
for i in range(number_units):
sample.loc[i]["cluster_id"] = sample.iloc[i]["unit_id"][0:4]
area_type = pd.DataFrame(np.unique(sample["cluster_id"]))
area_type.rename(columns={0: "cluster_id"}, inplace=True)
area_type["area_type"] = np.random.choice(("urban", "rural"), area_type.shape[0], p=(0.4, 0.6))
sample = pd.merge(sample, area_type, on="cluster_id")
sample["response_status"] = np.random.choice(
("IN", "RR", "NR", "UK"), number_units, p=(0.01, 0.81, 0.15, 0.03)
)
# print(pd.crosstab(sample["region_id"], sample["response_status"]))
sample["educ_level"] = np.random.choice(
("0. Primary", "1. High-School", "2. University"), number_units, p=(0.10, 0.60, 0.30),
)
# print(pd.crosstab(sample["region_id"], sample["educ_level"]))
sample["income"] = 0
low_income = 30000
primary = sample["educ_level"] == "0. Primary"
sample.income[primary] = np.random.gamma(low_income / 30e3, 30e3, np.sum(primary))
middle_income = 60000
highschool = sample["educ_level"] == "1. High-School"
sample.income[highschool] = np.random.gamma(
middle_income / 10e3, 10e3, np.sum(highschool)
) + np.random.normal(low_income / 4, low_income / 4, np.sum(highschool))
high_income = 90000
university = sample["educ_level"] == "2. University"
sample.income[university] = np.random.gamma(
high_income / 15e3, 15e3, np.sum(university)
) + np.random.normal(low_income / 2, low_income / 2, np.sum(university))
sample["income_level"] = "middle"
sample.loc[sample["income"] >= 100000, "income_level"] = "high"
sample.loc[sample["income"] <= 50000, "income_level"] = "low"
# print(pd.crosstab(sample["region_id"], sample["income_level"]))
# print(pd.crosstab(sample["educ_level"], sample["income_level"]))
sample.loc[sample["response_status"] == "NR", "income"] = np.nan
sample.loc[sample["response_status"] == "UK", "income"] = np.nan
sample.loc[sample["response_status"] == "NR", "income_level"] = ""
sample.loc[sample["response_status"] == "UK", "income_level"] = ""
sample.loc[sample["response_status"] == "NR", "educ_level"] = ""
sample.loc[sample["response_status"] == "UK", "educ_level"] = ""
# print(sample[primary])
# print(sample[highschool])
# print(sample[university])
sample["design_wgt"] = np.round(sample["cluster_id"].astype("int") / 10, 0)
# print(sum(sample["design_wgt"]))
# print(sample.sample(25))
# print(
# sample[
# [
# "region_id",
# "cluster_id",
# "area_type",
# "response_status",
# "educ_level",
# "income",
# "design_wgt",
# ]
# ].sample(50)
# )
sample.to_csv("./tests/weighting/synthetic_income_data.csv")
|
survey-methods/samplics
|
tests/weighting/data_weights.py
|
Python
|
mit
| 3,158
|
"""
This script can be used to ssh to a cloud server started by GNS3. It copies
the ssh keys for a server to a temp file on disk and starts ssh using the
keys.
Right now it only connects to the first cloud server listed in the config
file.
"""
import getopt
import os
import sys
from PyQt4 import QtCore, QtGui
SCRIPT_NAME = os.path.basename(__file__)
def parse_cmd_line(argv):
"""
Parse command line arguments
argv: Passed in sys.argv
"""
usage = """
USAGE: %s [-l] [-s <server_num>]
If no options are supplied a connection to server 1 will be opened.
Options:
-h, --help Display this menu :)
-l, --list List instances that are tracked
-s, --server-num Connect to this server number (1-indexed)
""" % (SCRIPT_NAME)
short_args = "hls:"
long_args = ("help", "list", "server-num=")
try:
opts, extra_opts = getopt.getopt(argv[1:], short_args, long_args)
except getopt.GetoptError as e:
print("Unrecognized command line option or missing required argument: %s" % (e))
print(usage)
sys.exit(2)
cmd_line_option_list = {'action': 'ssh', 'server': '1'}
for opt, val in opts:
if opt in ("-h", "--help"):
print(usage)
sys.exit(0)
elif opt in ("-l", "--list"):
cmd_line_option_list['action'] = 'list'
elif opt in ("-s", "--server-num"):
cmd_line_option_list['server'] = val
return cmd_line_option_list
def setup():
if sys.platform.startswith('win') or sys.platform.startswith('darwin'):
QtCore.QSettings.setDefaultFormat(QtCore.QSettings.IniFormat)
app = QtGui.QApplication([])
app.setOrganizationName("GNS3")
app.setOrganizationDomain("gns3.net")
app.setApplicationName("GNS3")
if not os.path.isfile(QtCore.QSettings().fileName()):
print('Config file {} not found! Aborting...'.format(QtCore.QSettings().fileName()))
sys.exit(1)
print('Config file: {}'.format(QtCore.QSettings().fileName()))
def read_cloud_settings():
settings = QtCore.QSettings()
settings.beginGroup("CloudInstances")
instances = []
# Load the instances
size = settings.beginReadArray("cloud_instance")
for index in range(0, size):
settings.setArrayIndex(index)
name = settings.value('name')
host = settings.value('host')
private_key = settings.value('private_key')
public_key = settings.value('public_key')
uid = settings.value('id')
instances.append((name, host, private_key, public_key, uid))
if len(instances) == 0:
raise Exception("Could not find any servers")
return instances
def main():
options = parse_cmd_line(sys.argv)
setup()
instances = read_cloud_settings()
if options['action'] == 'ssh':
name, host, private_key, public_key, uid = instances[int(options['server']) - 1]
print('Instance name: {}'.format(name))
print('Host ip: {}'.format(host))
public_key_path = '/tmp/id_rsa.pub'
open(public_key_path, 'w').write(public_key)
private_key_path = '/tmp/id_rsa'
open(private_key_path, 'w').write(private_key)
cmd = 'chmod 0600 {}'.format(private_key_path)
os.system(cmd)
print('Per-instance ssh keys written to {}'.format(private_key_path))
cmd = 'ssh -i /tmp/id_rsa root@{}'.format(host)
print(cmd)
os.system(cmd)
elif options['action'] == 'list':
print('ID Name IP UID')
for idx, info in enumerate(instances):
name, host, private_key, public_key, uid = info
print('{:2d} {} {} {}'.format(idx + 1, name, host, uid))
return 0
if __name__ == "__main__":
sys.exit(main())
|
noplay/gns3-gui
|
scripts/ssh_to_server.py
|
Python
|
gpl-3.0
| 3,813
|
__author__ = 'f.forti'
DEBUG = True
class Error(Exception):
"""
classe base per l'eccezioni.
DA EREDITARE IN OGNI ECCEZIONE CREATA.
"""
def __init__(self, msg):
"""
:param msg:
:return:
"""
msg = "--->ATTENTION! " + msg
super(Error, self).__init__(msg)
self.msg = msg
class constructionError(Error):
def __init__(self, message):
msg = "There is an error in the construction __init__. \n "+message
super(Error, self).__init__(msg)
def toStr(s):
if type(s) is list:
return "[ %s ]" % (reduce(lambda x, y: str(x)+", "+str(y), s))
elif type(s) is dict:
string = "{ "
for k in s:
string += "%s: %s" % (toStr(k), toStr(s[k]))
string += " }"
return string
else:
return str(s)
def dbg(s, *args):
pref = "----->DEBUG: "
if not DEBUG:
return False
if len(args) > 0:
if len(args) != s.count("%s"):
print pref+toStr(s)+toStr([toStr(arg) for arg in args])
else:
print pref+toStr(s) % tuple([toStr(arg) for arg in args])
else:
print pref+toStr(s)
def logger(func):
pref = "----->LOGGER: "
def inner(*args, **kwargs): #1
dbg("%s Function was %s", pref, str(func.__name__))
dbg("%s Arguments were: %s, %s", pref, *args, **kwargs)
returned = func(*args, **kwargs)
dbg("%s returned was: %s", pref, returned)
return returned
return inner
|
RainbowAcademy/ScriptingLectures
|
2015/HouseGenerator/python/utilities.py
|
Python
|
gpl-2.0
| 1,300
|
"""
17 nov. 2014
convert a bunch of fasta files, or a single multi fasta file, into a dictionary
"""
from __future__ import print_function
from collections import OrderedDict
import multiprocessing as mu
from os import path
import re
from pytadbit.utils.file_handling import magic_open
from functools import reduce
try:
basestring
except NameError:
basestring = str
def parse_fasta(f_names, chr_names=None, chr_filter=None, chr_regexp=None,
verbose=True, save_cache=True, reload_cache=False, only_length=False):
"""
Parse a list of fasta files, or just one fasta.
WARNING: The order is important
:param f_names: list of pathes to files, or just a single path
:param None chr_names: pass list of chromosome names, or just one. If None
are passed, then chromosome names will be inferred from fasta headers
:param None chr_filter: use only chromosome in the input list
:param None chr_regexp: use only chromosome matching
:param True save_cache: save a cached version of this file for faster
loadings (~4 times faster)
:param False reload_cache: reload cached genome
:param False only_length: returns dictionary with length of genome,not sequence
:returns: a sorted dictionary with chromosome names as keys, and sequences
as values (sequence in upper case)
"""
if isinstance(f_names, basestring):
f_names = [f_names]
if len(f_names) == 1:
fname = f_names[0] + '_genome.TADbit'
else:
fname = path.join(path.commonprefix(f_names), 'genome.TADbit')
if path.exists(fname) and not reload_cache:
if verbose:
print('Loading cached genome')
genome_seq = OrderedDict()
with open(fname) as f_open:
for line in f_open:
if line.startswith('>'):
c = line[1:].strip()
else:
if only_length:
genome_seq[c] = len(line.strip())
else:
genome_seq[c] = line.strip()
return genome_seq
if isinstance(chr_names, basestring):
chr_names = [chr_names]
if chr_filter:
bad_chrom = lambda x: not x in chr_filter
else:
bad_chrom = lambda x: False
if chr_regexp:
chr_regexp = re.compile(chr_regexp)
else:
chr_regexp = re.compile('.*')
genome_seq = OrderedDict()
if len(f_names) == 1:
header = None
seq = []
with magic_open(f_names[0]) as fhandler:
for line in fhandler:
if line.startswith('>'):
if header:
genome_seq[header] = ''.join(seq).upper()
header = line[1:].split()[0]
if bad_chrom(header) or not chr_regexp.match(header):
header = 'UNWANTED'
elif not chr_names:
if verbose:
print('Parsing %s' % (header))
else:
header = chr_names.pop(0)
if verbose:
print('Parsing %s as %s' % (line[1:].rstrip(),
header))
seq = []
continue
seq.append(line.rstrip())
if only_length:
genome_seq[header] = len(seq)
else:
genome_seq[header] = ''.join(seq).upper()
if 'UNWANTED' in genome_seq:
del(genome_seq['UNWANTED'])
else:
for fnam in f_names:
with magic_open(f_nam) as fhandler:
try:
while True:
if not chr_names:
header = next(fhandler)
if header.startswith('>'):
header = header[1:].split()[0]
if bad_chrom(header) or not chr_regexp.match(header):
header = 'UNWANTED'
genome_seq[header] = ''
break
else:
_ = next(fhandler)
header = chr_names.pop(0)
if bad_chrom(header):
header = 'UNWANTED'
genome_seq[header] = ''
break
except StopIteration:
raise Exception('No crocodiles found, is it fasta?')
if only_length:
genome_seq[header] = sum(len(l.rstrip()) for l in fhandler)
else:
genome_seq[header] = ''.join([l.rstrip() for l in fhandler]).upper()
if 'UNWANTED' in genome_seq:
del(genome_seq['UNWANTED'])
if save_cache and not only_length:
if verbose:
print('saving genome in cache')
if len(f_names) == 1:
fname = f_names[0] + '_genome.TADbit'
else:
fname = path.join(path.commonprefix(f_names), 'genome.TADbit')
out = open(fname, 'w')
for c in genome_seq:
out.write('>%s\n%s\n' % (c, genome_seq[c]))
out.close()
return genome_seq
def get_gc_content(genome, resolution, chromosomes=None, n_cpus=None, by_chrom=False):
"""
Get GC content by bins of a given size. Ns are nottaken into account in the
calculation, only the number of Gs and Cs over As, Ts, Gs and Cs
:param genome: a TADbit parsed genome object
:param resolution:
:param None chromosomes: GC content only calculated over these chromosomes
:param None n_cpus: parallelize (can't parallelize more than the number of
chromosomes)
:param False by_chrom: if False returns a unique list for the full genome
"""
chromosomes = chromosomes if chromosomes else list(genome.keys())
if not n_cpus:
n_cpus = mu.cpu_count()
pool = mu.Pool(n_cpus)
get_chr_gc = _get_chr_gc_dico if by_chrom else _get_chr_gc_list
jobs = {}
for crm in chromosomes:
jobs[crm] = pool.apply_async(get_chr_gc, args=(genome[crm], resolution))
pool.close()
pool.join()
if by_chrom:
gc_content = dict((crm, jobs[crm].get()) for crm in chromosomes)
else:
gc_content = reduce(lambda x,y: x + y, (jobs[crm].get() for crm in chromosomes))
return gc_content
def _get_chr_gc_list(chrom, resolution):
gc_content = []
for pos in range(0, len(chrom), resolution):
seq = chrom[pos:pos + resolution]
try:
gc_content.append(float(seq.count('G') + seq.count('C')) /
(len(seq) - seq.count('N')))
except ZeroDivisionError:
gc_content.append(float('nan'))
return gc_content
def _get_chr_gc_dico(chrom, reso):
gc_content = {}
for pos in range(0, len(chrom), reso):
seq = chrom[pos:pos + reso]
try:
gc_content[pos /reso] = (float(seq.count('G') + seq.count('C')) /
(len(seq) - seq.count('N')))
except ZeroDivisionError:
gc_content[pos /reso] = float('nan')
return gc_content
|
3DGenomes/tadbit
|
_pytadbit/parsers/genome_parser.py
|
Python
|
gpl-3.0
| 7,323
|
# arithmeticOperators.py
if __name__ == '__main__':
a = int(input())
b = int(input())
# Elegant solution, one line
print('{0} \n{1} \n{2}'.format((a + b), (a - b), (a * b)))
|
bluewitch/Code-Blue-Python
|
HR_arithmeticOperators.py
|
Python
|
mit
| 195
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.