text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
import io
import os
import sys
import json
import multiprocessing
class Setup(object):
OPT_BOOL = {"Y": True, "n": False}
OPT_SIGNATURE = {"Y": "optional", "n": "required"}
OPT_FILE_FORMAT = ["csv", "json"]
OPT_DIALECTS = {"abs": "Azure Blob Storage", "aqs": "Azure Queue Storage",
"s3": "Amazon Simple Storage Service (S3)", "sns": "Amazon SNS",
"file": "Local file"}
# void
def __init__(self):
self.config = {}
self.dialect = None
@property
def buffered(self):
return self.dialect in ('s3', 'abs', 'file')
# type
def q(self, text, default="", choices=None, required=False):
if choices:
text += " [{}]".format("/".join(choices))
if default:
text += " (default: {})".format(default)
text += ": "
while True:
value = raw_input(text) or default
if not value and required: continue
if not choices: return value
if value in choices: return value
print("Supported options: {}".format(", ".join(choices)))
# void
def _logfile(self):
self.config['logfile'] = self.q("Log filepath", default="output.log", required=True) \
if self.OPT_BOOL[self.q("Do you want to log into a file", default="n", choices=self.OPT_BOOL.keys())] \
else ""
# void
def _security(self):
self.config['signature'] = self.OPT_SIGNATURE[self.q("Hamustro will be served behind HTTPS?", default="n", choices=self.OPT_SIGNATURE.keys())]
self.config['shared_secret'] = self.q("Please set a shared secret key", required=True)
# void
def _dialect(self):
print("\nHamustro can send your events into ...")
for k,v in self.OPT_DIALECTS.items():
print(" [{}] {}".format(k,v))
self.dialect = self.q("Please choose a dialect", choices=self.OPT_DIALECTS.keys())
self.config['dialect'] = self.dialect
self._storage = {True: self._buffered, False: self._simple}[self.buffered]
self._dialect_options = getattr(self, self.dialect)
# void
def _workers(self):
rec_worker_size = multiprocessing.cpu_count()
if not self.buffered:
rec_worker_size *= 4
rec_worker_size += 1
print("\nHamustro is using multiple workers in parallel to process multiple requests at the same time.")
self.config['max_worker_size'] = int(self.q("How many worker do you need?", default=rec_worker_size, required=True))
self.config['max_queue_size'] = int(self.q("Queue size", default=rec_worker_size*20, required=True))
# void
def _flush(self):
if not self.buffered:
return
print("\nHamustro can flush events with the flush API if maintenance key is configured.")
self.config['maintenance_key'] = self.q("Maintenance key", default="mk", required=True) \
if self.OPT_BOOL[self.q("Do you want to use the flush API?", default="n", choices=self.OPT_BOOL.keys())] \
else ""
print("\nHamustro can flush periodically if automatic flush interval is configured.")
self.config['auto_flush_interval'] = int(self.q("Automatic flush interval in minutes", default=60, required=True) \
if self.OPT_BOOL[self.q("Do you want to setup automatic flush?", default="n", choices=self.OPT_BOOL.keys())] \
else 0)
# void
def _buffered(self):
print("\nHamustro's workers collect events in the memory to increase the performance.")
self.config['buffer_size'] = int(self.q("Define the buffer size/worker", required=True))
self.config['spread_buffer_size'] = self.OPT_BOOL[self.q("Do you want to randomize the buffer size near your setting to avoid flush conflicts?", choices=self.OPT_BOOL.keys(), default='Y')]
# void
def _simple(self):
print("\nHamustro send the incoming messages immediately to your selected target.")
self.config['retry_attempt'] = int(self.q("When the saving has failed, how many times do you want to try again before we remove the event?", default=3, required=True))
# void
def s3(self):
return {
'access_key_id': self.q("Access Key ID", required=True),
'secret_access_key': self.q("Secret Access Key", required=True),
'region': self.q("Region", required=True),
'bucket': self.q("Bucket", required=True),
'blob_path': self.q("Blob path", required=True, default="{date}/"),
'file_format': self.q("File output format", choices=self.OPT_FILE_FORMAT, default="json"),
'endpoint': self.q("Endpoint", required=True)}
# voifd
def abs(self):
return {
'account': self.q("Account", required=True),
'access_key': self.q("Access Key", required=True),
'container': self.q("Container", required=True),
'blob_path': self.q("Blob path", required=True, default="{date}/"),
'file_format': self.q("File output format", choices=self.OPT_FILE_FORMAT, default="csv")}
# void
def sns(self):
return {
'access_key_id': self.q("Access Key ID", required=True),
'secret_access_key': self.q("Secret Access Key", required=True),
'region': self.q("Region", required=True),
'topic_arn': self.q("Topic ARN", required=True)}
# void
def aqs(self):
return {
'account': self.q("Account", required=True),
'access_key': self.q("Access Key", required=True),
'queue_name': self.q("Queue Name", required=True)}
# void
def file(self):
return {
'file_path': self.q("File path", required=True, default="{date}/"),
'file_format': self.q("File output format", choices=self.OPT_FILE_FORMAT, default="csv"),
'compress': self.q("Do you want to compress the output files?", default="n", choices=self.OPT_BOOL.keys())}
# void
def _options(self):
print("\nPlease define the expected behavior of the collector")
self.config['masked_ip'] = self.OPT_BOOL[self.q("Do you want to remove the last octet of incoming IP addresses?", choices=self.OPT_BOOL.keys(), default="n")]
# void
def run(self):
self.path = self.q("Configuration path", default="config/config.json", required=True)
if os.path.exists(self.path) and not self.OPT_BOOL[self.q("Configuration exists, do you want to overwrite?", choices=self.OPT_BOOL.keys(), required=True)]:
return False
self._logfile()
self._security()
self._dialect()
self._workers()
self._storage()
self._flush()
print("\nPlease set the credentials for the selected ({}) dialect:".format(self.dialect))
self.config[self.dialect] = self._dialect_options()
self._options()
print("\nYour configuration file was created successfully!")
return True
# void
def save(self):
with io.open(self.path, "w", encoding="utf-8") as fd:
fd.write(json.dumps(self.config, indent=2, sort_keys=True).decode('utf-8'))
if __name__ == '__main__':
s = Setup()
if not s.run():
sys.exit(1)
s.save()
|
{
"content_hash": "ad6d5e7c8593b9dfc2820d905ae1a497",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 196,
"avg_line_length": 41.63636363636363,
"alnum_prop": 0.599617903930131,
"repo_name": "wunderlist/hamustro",
"id": "b89025f836a6e06650259013a3b44e60a010731b",
"size": "7328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "122885"
},
{
"name": "Lua",
"bytes": "1915"
},
{
"name": "Makefile",
"bytes": "2443"
},
{
"name": "Protocol Buffer",
"bytes": "1288"
},
{
"name": "Python",
"bytes": "13990"
},
{
"name": "Shell",
"bytes": "1674"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from models import TransactionStatus, SofortTransaction
class TransactionStatusInline(admin.TabularInline):
model = TransactionStatus
readonly_fields = ('status', 'reason', 'created_at')
can_delete = False
extra = 0
class TransactionAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
search_fields = ('order__pk', 'transaction_number')
readonly_fields = ('order', 'transaction_number', 'created_at', 'payment_method', 'status', 'status_reason', 'sender_html', 'recipient_html')
list_display = ('order', 'transaction_number', 'created_at')
inlines = [TransactionStatusInline]
fields = ('order', 'transaction_number', ('status', 'status_reason'), 'payment_method', 'sender_html', 'recipient_html', 'created_at')
def sender_html(self, obj):
return '<br>'.join(obj.sender.split('\n'))
sender_html.allow_tags = True
def recipient_html(self, obj):
return '<br>'.join(obj.recipient.split('\n'))
recipient_html.allow_tags = True
admin.site.register(SofortTransaction, TransactionAdmin)
|
{
"content_hash": "d73b4b6c2944309778b793bdaa759202",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 145,
"avg_line_length": 37.758620689655174,
"alnum_prop": 0.6904109589041096,
"repo_name": "piquadrat/django-shop-sofortpayment",
"id": "312f46872ded0e83036701e3a7e2851c27d89d81",
"size": "1118",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shop_sofortpayment/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28891"
}
],
"symlink_target": ""
}
|
def skip_has_account_middleware(func):
"""
Decorator to exempt certain views from the HasAccountMiddleware check
"""
func.skip_has_account_middleware = True
return func
|
{
"content_hash": "c2ab3010e183e87b981dfe0cdb8d8cbd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 31.5,
"alnum_prop": 0.708994708994709,
"repo_name": "ulope/guardhouse",
"id": "736b3b119925c714ec3dcb932439bf48674d19f1",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guardhouse/main/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "37782"
},
{
"name": "Ruby",
"bytes": "624"
}
],
"symlink_target": ""
}
|
"""
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#root of project
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'csqwlmc8s55o($rt6ozh7u+ui9zb-et00w$d90j8$^!nvj41_r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'yourgmail@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
If using gmail, you will need to
unlock Captcha to enable Django
to send for you:
https://accounts.google.com/displayunlockcaptcha
'''
# Application definition
INSTALLED_APPS = (
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party apps
'crispy_forms',
'registration',
#my apps
'newsletter',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_in_pro", "our_static"),
#os.path.join(BASE_DIR, "static_in_env"),
#'/var/www/static/',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "media_root")
#Crispy FORM TAGs SETTINGS
CRISPY_TEMPLATE_PACK = 'bootstrap3'
#DJANGO REGISTRATION REDUX SETTINGS
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
|
{
"content_hash": "8829e194b166aa65fe5a724dfe775c95",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 87,
"avg_line_length": 24.80536912751678,
"alnum_prop": 0.6920995670995671,
"repo_name": "joshdavies89/e-commerce",
"id": "e4adc543a818eac7837af56a761a016dcc23ab42",
"size": "3696",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/ecommerce/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "CSS",
"bytes": "43338"
},
{
"name": "HTML",
"bytes": "14099"
},
{
"name": "JavaScript",
"bytes": "79091"
},
{
"name": "Python",
"bytes": "22805"
}
],
"symlink_target": ""
}
|
"""Client support for network related api calls."""
import json
from hil.client.base import ClientBase
from hil.client.base import check_reserved_chars
class Network(ClientBase):
"""Consists of calls to query and manipulate network related
objects and relations.
"""
def list(self):
"""Lists all networks under HIL """
url = self.object_url('networks')
return self.check_response(self.httpClient.request("GET", url))
@check_reserved_chars()
def list_network_attachments(self, network, project):
"""Lists nodes connected to a network"""
url = self.object_url('network', network, 'attachments')
if project == "all":
return self.check_response(self.httpClient.request("GET", url))
params = {'project': project}
return self.check_response(
self.httpClient.request("GET", url, params=params))
@check_reserved_chars()
def show(self, network):
"""Shows attributes of a network. """
url = self.object_url('network', network)
return self.check_response(self.httpClient.request("GET", url))
@check_reserved_chars(slashes_ok=['net_id'])
def create(self, network, owner, access, net_id):
"""Create a link-layer <network>.
See docs/networks.md for details.
"""
url = self.object_url('network', network)
payload = json.dumps({
'owner': owner, 'access': access,
'net_id': net_id
})
return self.check_response(
self.httpClient.request("PUT", url, data=payload)
)
@check_reserved_chars()
def delete(self, network):
"""Delete a <network>. """
url = self.object_url('network', network)
return self.check_response(self.httpClient.request("DELETE", url))
@check_reserved_chars()
def grant_access(self, project, network):
"""Grants <project> access to <network>. """
url = self.object_url(
'network', network, 'access', project
)
return self.check_response(self.httpClient.request("PUT", url))
@check_reserved_chars()
def revoke_access(self, project, network):
"""Removes access of <network> from <project>. """
url = self.object_url(
'network', network, 'access', project
)
return self.check_response(self.httpClient.request("DELETE", url))
|
{
"content_hash": "1193ec3b9579d64ba710c2262d5803d0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 75,
"avg_line_length": 35.2,
"alnum_prop": 0.6006493506493507,
"repo_name": "CCI-MOC/haas",
"id": "0c405e087c325427f19a5ecc1dcb53201924d43a",
"size": "2464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hil/client/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "252862"
}
],
"symlink_target": ""
}
|
import unittest
from pale import Resource, ResourcePatch
from pale.fields import (BaseField, IntegerField, ListField, ResourceField,
ResourceListField, StringField)
class Stats(object):
def __init__(self):
self.logins = 0
class Counter(object):
def __init__(self, key, value=0):
self.key = key
self.value = value
class StatsResource(Resource):
_value_type = 'Test "stats" resource for patches'
_underlying_model = Stats
logins = IntegerField("Number of logins")
class CounterResource(Resource):
_value_type = 'Test repeated nested resources'
_underlying_model = Counter
name = StringField("Name of counter",
property_name='key')
value = IntegerField("Value of counter")
class User(object):
"""Has:
tokens - list of strings
counters - list of Counter
id - string id
username - string username
"""
def __init__(self, id, username):
assert isinstance(username, basestring)
self.username = username
assert isinstance(id, basestring)
self.id = id
self.stats = Stats()
self.counters = []
self.tokens = []
class UserResource(Resource):
_value_type = 'Test "user" resource for patches'
_underlying_model = User
username = StringField("Username")
id = StringField("User ID")
stats = ResourceField("Test of a nested resource",
resource_type=StatsResource)
counters = ResourceListField("List of misc. counters",
resource_type=CounterResource)
tokens = ListField("List of string tokens",
item_type=StringField)
class ResourcePatchTests(unittest.TestCase):
def setUp(self):
super(ResourcePatchTests, self).setUp()
def test_patch_resource(self):
user = User(
id="001",
username="soundofjw",
)
patch_data = {
'username': 'ammoses',
'stats': {
'logins': 12
},
'counters': [
{'name': 'products', 'value': 36}
],
'tokens': [
'gold-coin'
],
'bad_field': True,
}
user_resouce = UserResource()
dt = user_resouce._render_serializable(user, None)
self.assertEqual(dt['username'], 'soundofjw')
self.assertEqual(dt['stats']['logins'], 0)
self.assertEqual(dt['counters'], [])
self.assertEqual(dt['tokens'], [])
patch = ResourcePatch(patch_data, user_resouce)
patch.ignore_missing_fields = True
patch.apply_to_model(user)
dt = user_resouce._render_serializable(user, None)
self.assertEqual(dt['username'], 'ammoses')
self.assertEqual(dt['stats']['logins'], 12)
self.assertEqual(dt['counters'][0], {'name': 'products', 'value': 36})
self.assertEqual(dt['tokens'][0], 'gold-coin')
|
{
"content_hash": "74a243f57bc0294c46f18e0821b754ef",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 26.125,
"alnum_prop": 0.5902255639097744,
"repo_name": "Loudr/pale",
"id": "08d21518f2493ebe538718ec6342e35d5a1f85f9",
"size": "2950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_resource_patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175823"
},
{
"name": "Vim script",
"bytes": "49"
}
],
"symlink_target": ""
}
|
"""
A full GN block has the following computation steps
1. Compute updated edge attributes
2. Aggregate edge attributes per node
3. Compute updated node attributes
4. Aggregate edge attributes globally
5. Aggregate node attributes globally
6. Compute updated global attribute
[1] https://arxiv.org/pdf/1806.01261.pdf
"""
from typing import Dict, Sequence
import tensorflow as tf
from tensorflow.keras import constraints, initializers, regularizers
from tensorflow.keras.layers import Layer
from megnet import activations
from megnet.utils.typing import OptStrOrCallable
class GraphNetworkLayer(Layer):
"""
Implementation of a graph network layer. Current implementation is based on
neural networks for each update function, and sum or mean for each
aggregation function
Method:
call(inputs, mask=None): the logic of the layer, returns the final graph
compute_output_shape(input_shape): compute static output shapes, returns list of tuple shapes
build(input_shape): initialize the weights and biases for each function
phi_e(inputs): update function for bonds and returns updated bond attribute e_p
rho_e_v(e_p, inputs): aggregate updated bonds e_p to per atom attributes, b_e_p
phi_v(b_e_p, inputs): update the atom attributes by the results from previous step b_e_p and all the inputs
returns v_p.
rho_e_u(e_p, inputs): aggregate bonds to global attribute
rho_v_u(v_p, inputs): aggregate atom to global attributes
get_config(): part of keras interface for serialization
"""
def __init__(
self,
activation: OptStrOrCallable = None,
use_bias: bool = True,
kernel_initializer: OptStrOrCallable = "glorot_uniform",
bias_initializer: OptStrOrCallable = "zeros",
kernel_regularizer: OptStrOrCallable = None,
bias_regularizer: OptStrOrCallable = None,
activity_regularizer: OptStrOrCallable = None,
kernel_constraint: OptStrOrCallable = None,
bias_constraint: OptStrOrCallable = None,
**kwargs,
):
"""
Args:
activation (str): Default: None. The activation function used for each
sub-neural network. Examples include 'relu', 'softmax', 'tanh',
'sigmoid' and etc.
use_bias (bool): Default: True. Whether to use the bias term in the
neural network.
kernel_initializer (str): Default: 'glorot_uniform'. Initialization
function for the layer kernel weights,
bias_initializer (str): Default: 'zeros'
activity_regularizer (str): Default: None. The regularization function
for the output
kernel_constraint (str): Default: None. Keras constraint for kernel
values
bias_constraint (str): Default: None .Keras constraint for bias values
**kwargs:
"""
if "input_shape" not in kwargs and "input_dim" in kwargs:
kwargs["input_shape"] = (kwargs.pop("input_dim"),)
self.activation = activations.get(activation) # noqa
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
super().__init__(**kwargs)
def call(self, inputs: Sequence, mask=None) -> Sequence:
"""
Core logic of graph network
Args:
inputs (Sequence): input tensors
mask (tensor): mask tensor
Returns: output tensor
"""
e_p = self.phi_e(inputs)
b_ei_p = self.rho_e_v(e_p, inputs)
v_p = self.phi_v(b_ei_p, inputs)
b_e_p = self.rho_e_u(e_p, inputs)
b_v_p = self.rho_v_u(v_p, inputs)
u_p = self.phi_u(b_e_p, b_v_p, inputs)
return [v_p, e_p, u_p]
def phi_e(self, inputs: Sequence) -> tf.Tensor:
r"""
This is for updating the edge attributes
ek' = phi_e(ek, vrk, vsk, u)
Args:
inputs (Sequence): list or tuple for the graph inputs
Returns:
updated edge/bond attributes
"""
raise NotImplementedError
def rho_e_v(self, e_p: tf.Tensor, inputs: Sequence) -> tf.Tensor:
r"""
This is for step 2, aggregate edge attributes per node
Ei' = {(ek', rk, sk)} with rk =i, k=1:Ne
Args:
e_p (tf.Tensor): the updated edge attributes
inputs (Sequence): list or tuple for the graph inputs
Returns:
edge/bond to node/atom aggregated tensor
"""
raise NotImplementedError
def phi_v(self, b_ei_p: tf.Tensor, inputs: Sequence):
r"""
Step 3. Compute updated node attributes
v_i' = phi_v(\bar e_i, vi, u)
Args:
b_ei_p (tf.Tensor): edge-to-node aggregated tensor
inputs (Sequence): list or tuple for the graph inputs
Returns:
updated node/atom attributes
"""
raise NotImplementedError
def rho_e_u(self, e_p: tf.Tensor, inputs: Sequence) -> tf.Tensor:
r"""
let V' = {v'} i = 1:Nv
let E' = {(e_k', rk, sk)} k = 1:Ne
\bar e' = rho_e_u(E')
Args:
e_p (tf.Tensor): updated edge/bond attributes
inputs (Sequence): list or tuple for the graph inputs
Returns:
edge/bond to global/state aggregated tensor
"""
raise NotImplementedError
def rho_v_u(self, v_p: tf.Tensor, inputs: Sequence) -> tf.Tensor:
r"""
\bar v' = rho_v_u(V')
Args:
v_p (tf.Tensor): updated atom/node attributes
inputs (Sequence): list or tuple for the graph inputs
Returns:
atom/node to global/state aggregated tensor
"""
raise NotImplementedError
def phi_u(self, b_e_p: tf.Tensor, b_v_p: tf.Tensor, inputs: Sequence) -> tf.Tensor:
r"""
u' = phi_u(\bar e', \bar v', u)
Args:
b_e_p (tf.Tensor): edge/bond to global aggregated tensor
b_v_p (tf.Tensor): node/atom to global aggregated tensor
inputs (Sequence): list or tuple for the graph inputs
Returns:
updated globa/state attributes
"""
raise NotImplementedError
def get_config(self) -> Dict:
"""
Part of keras layer interface, where the signature is converted into a dict
Returns:
configurational dictionary
"""
config = {
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(self.activity_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items())) # noqa
|
{
"content_hash": "ad3138f851ccde13881b29be30baeeaa",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 115,
"avg_line_length": 38.89949748743719,
"alnum_prop": 0.61619945743444,
"repo_name": "materialsvirtuallab/megnet",
"id": "8f40d62cea2eacf7fadd75dd891697581fc0d2a4",
"size": "7741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "megnet/layers/graph/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "4156"
},
{
"name": "HTML",
"bytes": "11342"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Jupyter Notebook",
"bytes": "1346431"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "268660"
},
{
"name": "R",
"bytes": "10398"
},
{
"name": "Shell",
"bytes": "380"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals
import cv2
import numpy as np
NORM_SIZE = (1100, 800)
MERGE_SIZE = (700, 530)
# ID number field
NUM_FIELD = np.array([[450, 210], [920, 270]], dtype='int')
NUM_TITLE = np.array([[0, 0], [70, 60]], dtype='int')
# Name field
NAME_FIELD = np.array([[340, 280], [1040, 420]], dtype='int')
NAME_TITLE = np.array([[0, 0], [120, 70]], dtype='int')
# Date of birth field
DOB_FIELD = np.array([[340, 420], [1040, 490]], dtype='int')
DOB_TITLE = np.array([[0, 0], [180, 70]], dtype='int')
# Birth place field
BPLACE_FIELD = np.array([[340, 490], [1040, 625]], dtype='int')
BPLACE_TITLE = np.array([[0, 0], [230, 60]], dtype='int')
# Current living place field
CPLACE_FIELD = np.array([[340, 625], [1040, 755]], dtype='int')
CPLACE_TITLE = np.array([[0, 0], [340, 60]], dtype='int')
TEXT_MIN_WIDTH = 5
TEXT_MIN_HEIGHT = 20
class Span():
def __init__(self, x0, y0, img):
self.x0 = x0
self.y0 = y0
self.image = img
self.segcols = None # segmentation columns in ground truth
self.predict_segments = None # predicted segmentation columns
self.refine_segments = None # refined segmentation columns (2-means clustering)
self.predict_characters = None # predicted character list
class Field():
def __init__(self, img, name):
self.image = img
self.name = name
self.spans = [] # list of Spans
self.postprocessed_text = None # text in field, post-processed
def get_raw_text(self):
text = '\n'.join([''.join([c for c in s.predict_characters]) for s in self.spans])
return text
def hide_title(self, title):
self.image[title[0][1]:title[1][1], title[0][0]:title[1][0]] = 255
def find_text_spans(self):
img_b = self.image[:, :, 0]
img_g = self.image[:, :, 1]
idx_black = np.bitwise_and(img_b < 170, img_g < 170) # DOF
thresh = np.ones_like(self.image) * 255
thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
thresh[idx_black] = 0
kernel1 = np.ones((3,1), np.uint8) # DOF
kernel2 = np.ones((1,3), np.uint8) # DOF
horizontal = cv2.erode(thresh, kernel1, iterations=1)
horizontal = cv2.dilate(horizontal, kernel2, iterations=4)
vertical = cv2.erode(thresh, kernel2, iterations=1)
vertical = cv2.dilate(vertical, kernel1, iterations=4)
thresh = np.minimum(horizontal, vertical)
kernel3 = np.ones((1,5), np.uint8)
thresh = cv2.erode(thresh, kernel3, iterations=10)
big_boxes = self._get_contour_boxes(thresh)
for b in big_boxes:
x0, y0, w, h = b
y0 = max(0, y0-10)
h = min(self.image.shape[0], y0+h+20)
img = self.image[y0:h, x0:x0+w]
self.spans.append(Span(x0, y0, img))
def _get_contour_boxes(self, img):
mask = cv2.bitwise_not(img)
if cv2.__version__[0] == '3':
_, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
else:
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour_big_boxes = []
for contour in contours:
rect = cv2.boundingRect(contour)
_, ymin, width, height = rect
if (width < TEXT_MIN_WIDTH or height < TEXT_MIN_HEIGHT):
continue
pos = 0
while contour_big_boxes and contour_big_boxes[pos][1] < ymin:
pos += 1
contour_big_boxes.insert(pos, rect)
return contour_big_boxes
class Image():
def __init__(self, img, name=None):
self.image = cv2.resize(img, dsize=NORM_SIZE, interpolation=cv2.INTER_CUBIC)
if name:
self.base = name.split('.')[0]
self.extension = name.split('.')[1]
self.fields = self._get_fields() # list of infor fields
for f in self.fields:
f.find_text_spans()
def merge_fields(self):
# merge 5 information fields (images) into one image to be recognized
padding = 20
merged_img = np.ones((MERGE_SIZE[1]+padding*2, MERGE_SIZE[0]+padding*2), dtype="uint8") * 255
cur_y = padding
cur_x = padding
merged_img = cv2.cvtColor(merged_img, cv2.COLOR_GRAY2RGB)
for f in self.fields:
height, width = f.image.shape[:2]
merged_img[cur_y:cur_y+height, cur_x:cur_x+width] = f.image
cur_y += height
return merged_img
def _get_fields(self):
# return all 5 information fields as 5 independent images
num = Field(img=self.image[NUM_FIELD[0][1]:NUM_FIELD[1][1], NUM_FIELD[0][0]:NUM_FIELD[1][0]], name='num')
num.hide_title(NUM_TITLE)
name = Field(img=self.image[NAME_FIELD[0][1]:NAME_FIELD[1][1], NAME_FIELD[0][0]:NAME_FIELD[1][0]], name='name')
name.hide_title(NAME_TITLE)
dob = Field(img=self.image[DOB_FIELD[0][1]:DOB_FIELD[1][1], DOB_FIELD[0][0]:DOB_FIELD[1][0]], name='dob')
dob.hide_title(DOB_TITLE)
bplace = Field(img=self.image[BPLACE_FIELD[0][1]:BPLACE_FIELD[1][1], BPLACE_FIELD[0][0]:BPLACE_FIELD[1][0]], name='bplace')
bplace.hide_title(BPLACE_TITLE)
cplace = Field(img=self.image[CPLACE_FIELD[0][1]:CPLACE_FIELD[1][1], CPLACE_FIELD[0][0]:CPLACE_FIELD[1][0]], name='cplace')
cplace.hide_title(CPLACE_TITLE)
fields = [num, name, dob, bplace, cplace]
return fields
|
{
"content_hash": "7997c59f3f395abc56f695ef51851b32",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 125,
"avg_line_length": 31.240506329113924,
"alnum_prop": 0.654578606158833,
"repo_name": "trangnm58/idrec",
"id": "c32a025b13c748110567cd852bce8d79d778813e",
"size": "4936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IdRecDemo/idocr/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2010"
},
{
"name": "Python",
"bytes": "106671"
}
],
"symlink_target": ""
}
|
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
body = '<h1>Hello, %s!</h1>' % (environ['PATH_INFO'][1:] or 'web2')
return [body.encode('utf-8')]
|
{
"content_hash": "20bfae7b10435f309ade7b49ba7d51f6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 71,
"avg_line_length": 42.2,
"alnum_prop": 0.6018957345971564,
"repo_name": "PeytonXu/learn-python",
"id": "5de3a52b9e1735b90d8e73af1129e15207461c74",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learn/www.liaoxuefeng.com/webHello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "347"
},
{
"name": "HTML",
"bytes": "669"
},
{
"name": "Python",
"bytes": "108773"
},
{
"name": "Visual Basic",
"bytes": "78"
}
],
"symlink_target": ""
}
|
from ._assets import (
Asset,
Equity,
Future,
make_asset_array,
CACHE_FILE_TEMPLATE
)
from .assets import (
AssetFinder,
AssetConvertible,
AssetFinderCachedEquities
)
__all__ = [
'Asset',
'Equity',
'Future',
'AssetFinder',
'AssetFinderCachedEquities',
'AssetConvertible',
'make_asset_array',
'CACHE_FILE_TEMPLATE'
]
|
{
"content_hash": "2b606277821acd75807dfb1c73d8c035",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 32,
"avg_line_length": 16.565217391304348,
"alnum_prop": 0.6167979002624672,
"repo_name": "nborggren/zipline",
"id": "51b4358add28ba73d17c4b23b26ec0b752b4e7f6",
"size": "964",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zipline/assets/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "800"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "168399"
},
{
"name": "Python",
"bytes": "1869654"
},
{
"name": "Shell",
"bytes": "4284"
}
],
"symlink_target": ""
}
|
import warnings
from odin.compatibility import deprecated
class TestDeprecated:
def test_function_deprecation_warning(self):
@deprecated("No longer used.", category=UserWarning)
def deprecated_function():
pass
with warnings.catch_warnings(record=True) as warning_log:
deprecated_function()
# Compare the message values
assert [
str(m.message) for m in sorted(warning_log, key=lambda l: str(l.message))
] == [
"deprecated_function is deprecated and scheduled for removal. No longer used.",
]
def test_class_deprecation_warning(self):
@deprecated("No longer used.", category=UserWarning)
class DeprecatedClass:
pass
with warnings.catch_warnings(record=True) as warning_log:
DeprecatedClass()
# Compare the message values
assert [
str(m.message) for m in sorted(warning_log, key=lambda l: str(l.message))
] == [
"DeprecatedClass is deprecated and scheduled for removal. No longer used.",
]
|
{
"content_hash": "e15ded3624996751d54e6eea4ec0459e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 91,
"avg_line_length": 32.64705882352941,
"alnum_prop": 0.6216216216216216,
"repo_name": "python-odin/odin",
"id": "46240ab8a91f8efe408c495d4d5c7b9d71966981",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "tests/test_compatibility.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "490731"
}
],
"symlink_target": ""
}
|
import logging
import time
import uuid
import os
import Queue
import pytest
from chatexchange.client import Client
from chatexchange import events
import live_testing
logger = logging.getLogger(__name__)
TEST_ROOMS = [
('stackexchange.com', '14219'), # Charcoal Sandbox
]
if (os.environ.get('TRAVIS_BUILD_ID') and
os.environ.get('TRAVIS_REPO_SLUG') and
os.environ.get('TRAVIS_COMMIT')):
TEST_MESSAGE_FORMAT = (
"[ [ChatExchange@Travis](https://travis-ci.org/"
"{0[TRAVIS_REPO_SLUG]}/builds/{0[TRAVIS_BUILD_ID]} \"This is "
"a test message for ChatExchange using the nonce {{0}}.\") ] "
"This is a test of [{0[TRAVIS_REPO_SLUG]}@{short_commit}]("
"https://github.com/{0[TRAVIS_REPO_SLUG]}/commit/{0[TRAVIS_COMMIT]})."
).format(os.environ, short_commit=os.environ['TRAVIS_COMMIT'][:8])
else:
TEST_MESSAGE_FORMAT = (
"[ [ChatExchange@localhost](https://github.com/Manishearth/"
"ChatExchange/ \"This is a test message for ChatExchange using "
"the nonce {0}.\") ] This is a test message for ChatExchange.")
if live_testing.enabled:
@pytest.mark.parametrize('host_id,room_id', TEST_ROOMS)
@pytest.mark.timeout(240)
def test_se_message_echo(host_id, room_id):
"""
Tests that we are able to send a message, and recieve it back,
send a reply, and recieve that back, within a reasonable amount
of time.
This is a lot of complexity for a single test, but we don't want
to flood Stack Exchange with more test messages than necessary.
"""
client = Client(host_id)
client.login(
live_testing.email,
live_testing.password)
timeout_duration = 60
pending_events = Queue.Queue()
def get_event(predicate):
"""
Waits until it has seen a message passing the specified
predicate from both polling and sockets.
Asserts that it has not waited longer than the specified
timeout, and asserts that the events from difference sources
have the same ID.
This may dequeue any number of additional unrelated events
while it is running, so it's not appropriate if you are
trying to wait for multiple events at once.
"""
socket_event = None
polling_event = None
timeout = time.time() + timeout_duration
while (not (socket_event and polling_event)
and time.time() < timeout):
try:
is_socket, event = pending_events.get(timeout=1)
except Queue.Empty:
continue
if predicate(event):
logger.info(
"Expected event (is_socket==%r): %r",
is_socket, event)
if is_socket:
assert socket_event is None
socket_event = event
else:
assert polling_event is None
polling_event = event
else:
logger.debug("Unexpected events: %r", event)
assert socket_event and polling_event
assert type(socket_event) is type(polling_event)
assert socket_event.id == polling_event.id
return socket_event
logger.debug("Joining chat")
room = client.get_room(room_id)
room.join()
room.watch_polling(lambda event, _:
pending_events.put((False, event)), 5)
room.watch_socket(lambda event, _:
pending_events.put((True, event)))
time.sleep(2) # Avoid race conditions
test_message_nonce = uuid.uuid4().hex
test_message_content = TEST_MESSAGE_FORMAT.format(test_message_nonce)
logger.debug("Sending test message")
room.send_message(test_message_content)
@get_event
def test_message_posted(event):
return (
isinstance(event, events.MessagePosted)
and test_message_nonce in event.content
)
logger.debug("Observed test edit")
test_reply_nonce = uuid.uuid4().hex
test_reply_content = TEST_MESSAGE_FORMAT.format(test_reply_nonce)
logger.debug("Sending test reply")
test_message_posted.message.reply(test_reply_content)
# XXX: The limitations of get_event don't allow us to also
# XXX: look for the corresponding MessagePosted event.
@get_event
def test_reply(event):
return (
isinstance(event, events.MessageReply)
and test_reply_nonce in event.content
)
logger.debug("Observed test reply")
assert test_reply.parent_message_id == test_message_posted.message.id
assert test_reply.message.parent.id == test_reply.parent_message_id
assert test_message_posted.message.id == test_message_posted.message.id
assert test_reply.message.parent is test_message_posted.message
# unsafe - html content is unstable; may be inconsistent between views
# assert test_reply.message.parent.content == test_message_posted.content
test_edit_nonce = uuid.uuid4().hex
test_edit_content = TEST_MESSAGE_FORMAT.format(test_edit_nonce)
logger.debug("Sending test edits")
# Send a lot of edits in a row, to ensure we don't lose any
# from throttling being ignored.
test_message_posted.message.edit(
"**this is a** test edit and should be edited again")
test_message_posted.message.edit(
"this is **another test edit** and should be edited again")
test_message_posted.message.edit(
"this is **yet** another test edit and **should be edited again**")
test_message_posted.message.edit(test_edit_content)
@get_event
def test_edit(event):
return (
isinstance(event, events.MessageEdited)
and test_edit_nonce in event.content
)
logger.debug("Observed final test edit")
assert test_message_posted.message is test_edit.message
assert test_edit.message.id == test_message_posted.message.id
assert test_edit.message.edits == 4
assert test_edit.message.content_source == test_edit_content
# it should be safe to assume that there isn't so much activity
# that these events will have been flushed out of recent_events.
assert test_message_posted in client._recently_gotten_objects
assert test_reply in client._recently_gotten_objects
assert test_edit in client._recently_gotten_objects
client.logout()
|
{
"content_hash": "e773f88a14d095dd50f718f672def405",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 81,
"avg_line_length": 35.08205128205128,
"alnum_prop": 0.6019587779564392,
"repo_name": "hichris1234/ChatExchange",
"id": "cba4cd1843bb9947e4bfcd28708deb08c2526223",
"size": "6841",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "Python",
"bytes": "80607"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
}
|
import os
import unittest
from Constants.PathConstants import PathConstants
from Constants.MessageKeys import MessageKeys
from Domain.Presentation import Presentation
class PresentationTest(unittest.TestCase):
def setUp(self):
self.presentation = Presentation()
self.presentation_filenames = ["a.jpg", "g.mp4", "test_text.txt"]
def set_presentation_elements(self, presentation_filenames):
self.presentation.set_source_folder(PathConstants.TEST_MEDIA_FOLDER)
self.presentation.set_files(presentation_filenames)
self.presentation.get_presentation_elements_from_path()
def test_init_works_as_inteded(self):
self.assertEqual(self.presentation.get_presentation_content(), [])
self.assertEqual(self.presentation.presentation_elements, [])
self.assertEqual(-1, self.presentation.index)
self.assertEqual(self.presentation.media_path, PathConstants.MEDIA_FOLDER)
def test_get_presentation_elements_from_path(self):
self.set_presentation_elements(self.presentation_filenames)
self.assertEqual(self.presentation_filenames, self.presentation.get_presentation_content())
self.presentation.get_presentation_elements_from_path()
self.assertEqual(len(self.presentation.presentation_elements), 3)
for i in range(0, len(self.presentation.presentation_elements)):
self.assertEqual(self.presentation.presentation_elements[i].source_file,
os.path.join(PathConstants.TEST_MEDIA_FOLDER, self.presentation_filenames[i]))
def test_pic_file_is_supported(self):
pic_file = os.path.join(PathConstants.TEST_MEDIA_FOLDER, "a.jpg")
self.assertTrue(Presentation.filetype_is_supported(pic_file))
def test_not_pic_file_is_not_supported(self):
not_pic_file = os.path.join(PathConstants.TEST_MEDIA_FOLDER, "test_text.txt")
self.assertFalse(Presentation.filetype_is_supported(not_pic_file))
def test_get_function_when_presentation_elements_is_none(self):
self.assertIsNone(self.presentation.get(0))
def test_get_function_lower_boundary(self):
self.set_presentation_elements(self.presentation_filenames)
self.assertIsNone(self.presentation.get(-1))
def test_get_function_upper_boundary(self):
self.set_presentation_elements(self.presentation_filenames)
self.assertIsNone(self.presentation.get(len(self.presentation_filenames)))
def test_get_first_element(self):
self.set_presentation_elements(self.presentation_filenames)
self.assertEqual(self.presentation.get(0).source_file,
os.path.join(PathConstants.TEST_MEDIA_FOLDER, self.presentation_filenames[0]))
def test_get_last_element(self):
self.set_presentation_elements(self.presentation_filenames)
self.assertEqual(self.presentation.get(len(self.presentation.presentation_elements) - 1).source_file,
os.path.join(PathConstants.TEST_MEDIA_FOLDER, self.presentation_filenames[len(self.presentation_filenames) - 1]))
def test_get_message_dictionary(self):
self.set_presentation_elements(self.presentation_filenames)
dickie = self.presentation.get_message_dictionary()
self.assertEqual(dickie[MessageKeys.index_key], -1)
self.assertEqual(dickie[MessageKeys.presentation_content_key], self.presentation_filenames)
self.presentation.get_next()
dickie = self.presentation.get_message_dictionary()
self.assertEqual(dickie[MessageKeys.index_key], 0)
self.assertEqual(dickie[MessageKeys.presentation_content_key], self.presentation_filenames)
|
{
"content_hash": "c860072eed7dd3b1b5207736249a103c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 138,
"avg_line_length": 51.111111111111114,
"alnum_prop": 0.7247282608695652,
"repo_name": "RemuTeam/Remu",
"id": "3a54acf423c25af4729de6d24208de89802d2f4d",
"size": "3680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/tests/Domain/test_Presentation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "221"
},
{
"name": "Python",
"bytes": "218196"
}
],
"symlink_target": ""
}
|
from __future__ import division
import os.path
import random
import urllib
from datetime import datetime
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from PIL import Image
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
class ExternalImage(models.Model):
url = models.URLField()
etag = models.TextField(null=True)
last_modified = models.TextField(null=True)
last_updated = models.DateTimeField() # This one is in UTC
width = models.PositiveIntegerField(null=True)
height = models.PositiveIntegerField(null=True)
def save(self, force_insert=False, force_update=False, *args, **kwargs):
self.last_updated = datetime.utcnow()
super(ExternalImage, self).save(force_insert=False, force_update=False, **kwargs)
def get_external_image_dir():
return getattr(settings, 'EXTERNAL_IMAGE_DIR', os.path.join(settings.CACHE_DIR, 'external_images'))
class ExternalImageSized(models.Model):
external_image = models.ForeignKey(ExternalImage)
width = models.PositiveIntegerField()
height = models.PositiveIntegerField()
slug = models.SlugField()
content_type = models.TextField()
def get_filename(self):
external_image_dir = get_external_image_dir()
if not self.slug:
while not self.slug or ExternalImageSized.objects.filter(slug=self.slug).count():
self.slug = "%08x" % random.randint(0, 16**8-1)
if not os.path.exists(external_image_dir):
os.makedirs(external_image_dir)
return os.path.join(external_image_dir, self.slug)
def get_absolute_url(self):
return reverse('external_media:image', args=[self.slug])
def save(self, force_insert=False, force_update=False, *args, **kwargs):
if not self.id:
response = urllib.urlopen(self.external_image.url)
data = StringIO(response.read())
im = Image.open(data)
size = im.size
ratio = size[1] / size[0]
if self.width >= size[0]:
resized = im
else:
try:
resized = im.resize((self.width, int(round(self.width*ratio))), Image.ANTIALIAS)
except IOError, e:
if e.message == "cannot read interlaced PNG files":
# Ain't nothing can be done until you upgrade PIL to 1.1.7
resized = im
else:
raise
self.width, self.height = resized.size
try:
resized.save(self.get_filename(), format='jpeg')
self.content_type = 'image/jpeg'
except IOError, e:
try:
resized.convert('RGB').save(self.get_filename(), format='jpeg')
self.content_type = 'image/jpeg'
except IOError:
open(self.get_filename(), 'wb').write(data.getvalue())
self.content_type = response.headers['content-type']
self.external_image.width = size[0]
self.external_image.height = size[1]
super(ExternalImageSized, self).save(force_insert=False, force_update=False, **kwargs)
def delete(self):
try:
os.unlink(self.get_filename())
except OSError:
# Ignore errors where we're trying to delete a file that's already
# been deleted
pass
super(ExternalImageSized, self).delete()
|
{
"content_hash": "2f585f64a7ce8e9e4c9ab8a9267a213b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 103,
"avg_line_length": 35.89,
"alnum_prop": 0.6057397603789356,
"repo_name": "mollyproject/mollyproject",
"id": "44938bd29fae370c0dd23018a15f394e26eff55d",
"size": "3589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molly/external_media/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90319"
},
{
"name": "JavaScript",
"bytes": "76592"
},
{
"name": "Python",
"bytes": "1120664"
},
{
"name": "Shell",
"bytes": "4042"
},
{
"name": "XSLT",
"bytes": "11864"
}
],
"symlink_target": ""
}
|
"""Functions and classes related to optimization (weight updates).
Source: https://github.com/google-research/bert
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss,
init_lr,
num_train_steps,
num_warmup_steps,
use_tpu,
freeze_layer_fn=None):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate +
is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
if freeze_layer_fn:
tvars = [v for v in tvars if not freeze_layer_fn(v.name)
] # freeze layers based on function
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
tf.logging.info(" name = %s, shape = %s", var.name, var.shape)
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) +
tf.multiply(1.0 - self.beta_2, tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
{
"content_hash": "cc6fae0dd6721a15cc24d2b00ed59e8e",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 78,
"avg_line_length": 34.40340909090909,
"alnum_prop": 0.6340214698596202,
"repo_name": "google-research/google-research",
"id": "58126fd26bf6816b5e8ae6fe055393db494407bf",
"size": "7271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goemotions/bert/optimization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import math
import os
import re
import tokenize
import unittest
import warnings
from django.core.validators import RegexValidator, EmailValidator
from django.db import models, migrations
from django.db.migrations.writer import MigrationWriter, SettingsReference
from django.test import TestCase
from django.conf import settings
from django.utils import datetime_safe, six
from django.utils.deconstruct import deconstructible
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import get_default_timezone, utc, FixedOffset
import custom_migration_operations.operations
import custom_migration_operations.more_operations
from .models import FoodQuerySet, FoodManager
class TestModel1(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
class WriterTests(TestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
l = {}
try:
exec(string, globals(), l)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return l
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize(self):
"""
Tests various different forms of the serializer.
This does not care about formatting, just that the parsed result is
correct, so we always exec() the result and check that.
"""
# Basic values
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(None)
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
# Builtins
self.assertSerializedEqual([list, tuple, dict, set])
string, imports = MigrationWriter.serialize([list, tuple, dict, set])
self.assertEqual(string, "[list, tuple, dict, set]")
self.assertEqual(imports, set())
# Functions
with six.assertRaisesRegex(self, ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
# Datetime stuff
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=FixedOffset(180)))
safe_date = datetime_safe.date(2014, 3, 31)
string, imports = MigrationWriter.serialize(safe_date)
self.assertEqual(string, repr(datetime.date(2014, 3, 31)))
self.assertEqual(imports, {'import datetime'})
safe_time = datetime_safe.time(10, 25)
string, imports = MigrationWriter.serialize(safe_time)
self.assertEqual(string, repr(datetime.time(10, 25)))
self.assertEqual(imports, {'import datetime'})
safe_datetime = datetime_safe.datetime(2014, 3, 31, 16, 4, 31)
string, imports = MigrationWriter.serialize(safe_datetime)
self.assertEqual(string, repr(datetime.datetime(2014, 3, 31, 16, 4, 31)))
self.assertEqual(imports, {'import datetime'})
timezone_aware_datetime = datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)
string, imports = MigrationWriter.serialize(timezone_aware_datetime)
self.assertEqual(string, "datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)")
self.assertEqual(imports, {'import datetime', 'from django.utils.timezone import utc'})
# Django fields
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
# Setting references
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
(
"settings.AUTH_USER_MODEL",
{"from django.conf import settings"},
)
)
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
(
"((0, 0), (1, 1), (2, 4))",
set(),
)
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$', re.U)
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$', re.U))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$', 32))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.U)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=32)")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with six.assertRaisesRegex(self, ImportError, "No module named '?custom'?"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
@unittest.skipUnless(six.PY2, "Only applies on Python 2")
def test_serialize_direct_function_reference(self):
"""
Ticket #22436: You cannot use a function straight from its body
(e.g. define the method and use it in the same body)
"""
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""
Neither py2 or py3 can serialize a reference in a local scope.
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_local_function_reference_message(self):
"""
Make sure user is seeing which module/function is the issue
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with six.assertRaisesRegex(self, ValueError,
'^Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# It should NOT be unicode.
self.assertIsInstance(output, six.binary_type, "Migration as_string returned unicode")
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
# In order to preserve compatibility with Python 3.2 unicode literals
# prefix shouldn't be added to strings.
tokens = tokenize.generate_tokens(six.StringIO(str(output)).readline)
for token_type, token_source, (srow, scol), __, line in tokens:
if token_type == tokenize.STRING:
self.assertFalse(
token_source.startswith('u'),
"Unicode literal prefix found at %d:%d: %r" % (
srow, scol, line.strip()
)
)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
# Silence warning on Python 2: Not importing directory
# 'tests/migrations/migrations_test_apps/without_init_file/migrations':
# missing __init__.py
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_serialize_datetime(self):
"""
#23365 -- Timezone-aware datetimes should be allowed.
"""
# naive datetime
naive_datetime = datetime.datetime(2014, 1, 1, 1, 1)
self.assertEqual(MigrationWriter.serialize_datetime(naive_datetime),
"datetime.datetime(2014, 1, 1, 1, 1)")
# datetime with utc timezone
utc_datetime = datetime.datetime(2014, 1, 1, 1, 1, tzinfo=utc)
self.assertEqual(MigrationWriter.serialize_datetime(utc_datetime),
"datetime.datetime(2014, 1, 1, 1, 1, tzinfo=utc)")
# datetime with FixedOffset tzinfo
fixed_offset_datetime = datetime.datetime(2014, 1, 1, 1, 1, tzinfo=FixedOffset(180))
self.assertEqual(MigrationWriter.serialize_datetime(fixed_offset_datetime),
"datetime.datetime(2013, 12, 31, 22, 1, tzinfo=utc)")
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructableInstances(object):
def deconstruct(self):
return ('DeconstructableInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructableInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructableInstances)")
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
|
{
"content_hash": "99f471cfe2397edbae4c98b556cec16c",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 126,
"avg_line_length": 45.85558583106267,
"alnum_prop": 0.6405609364786975,
"repo_name": "webostin/django-btc",
"id": "5e7e8b75cdac54bd84f14749be1aa5eaec565e47",
"size": "16855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/migrations/test_writer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11304"
},
{
"name": "JavaScript",
"bytes": "1152"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "5473418"
},
{
"name": "Shell",
"bytes": "8205"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from dashboard.models import User, App, APICall, Webhook, WebhookTriggerHistory
class Command(BaseCommand):
help = 'Cleans Dashboard of everything'
def handle(self, *args, **options):
string = input("THIS WILL WIPE THESE MODELS ARE YOU SURE? "
"TYPE DELETE TO CONFIRM!: ")
if string == "DELETE":
User.objects.all().delete()
App.objects.all().delete()
APICall.objects.all().delete()
Webhook.objects.all().delete()
WebhookTriggerHistory.objects.all().delete()
|
{
"content_hash": "b9a54e8188b59c65d60b4bc6d9788cf2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 35.55555555555556,
"alnum_prop": 0.6078125,
"repo_name": "uclapi/uclapi",
"id": "975a8fb496a42ce8f844e09b00e9d873ba96e0c1",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/uclapi/dashboard/management/commands/clear_dashboard_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "Dockerfile",
"bytes": "21102"
},
{
"name": "HTML",
"bytes": "6848"
},
{
"name": "JavaScript",
"bytes": "167002"
},
{
"name": "Makefile",
"bytes": "584"
},
{
"name": "PLpgSQL",
"bytes": "14989"
},
{
"name": "Python",
"bytes": "878972"
},
{
"name": "SCSS",
"bytes": "25908"
},
{
"name": "Shell",
"bytes": "13191"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
INCLUDE_DIRECTIVE_RE = re.compile(r'^#include "(.*)"')
RUNTIME_LAYER_HEADERS = [
'runtime/vm/isolate.h',
'runtime/vm/object.h',
'runtime/vm/raw_object.h',
'runtime/vm/thread.h',
]
SHOULD_NOT_DEPEND_ON_RUNTIME = [
'runtime/vm/allocation.h',
'runtime/vm/growable_array.h',
]
class LayeringChecker(object):
def __init__(self, root):
self.root = root
self.worklist = set()
# Mapping from header to a set of files it is included into.
self.included_into = dict()
# Set of files that were parsed to avoid double parsing.
self.loaded = set()
# Mapping from headers to their layer.
self.file_layers = {file: 'runtime' for file in RUNTIME_LAYER_HEADERS}
def Check(self):
self.AddAllSourcesToWorklist(os.path.join(self.root, 'runtime/vm'))
self.BuildIncludesGraph()
errors = self.PropagateLayers()
errors += self.CheckNotInRuntime(SHOULD_NOT_DEPEND_ON_RUNTIME)
return errors
def CheckNotInRuntime(self, files):
"""Check that given files do not depend on runtime layer."""
errors = []
for file in files:
if not os.path.exists(os.path.join(self.root, file)):
errors.append('File %s does not exist.' % (file))
if self.file_layers.get(file) is not None:
errors.append(
'LAYERING ERROR: %s includes object.h or raw_object.h' %
(file))
return errors
def BuildIncludesGraph(self):
while self.worklist:
file = self.worklist.pop()
deps = self.ExtractIncludes(file)
self.loaded.add(file)
for d in deps:
if d not in self.included_into:
self.included_into[d] = set()
self.included_into[d].add(file)
if d not in self.loaded:
self.worklist.add(d)
def PropagateLayers(self):
"""Propagate layering information through include graph.
If A is in layer L and A is included into B then B is in layer L.
"""
errors = []
self.worklist = set(self.file_layers.keys())
while self.worklist:
file = self.worklist.pop()
if file not in self.included_into:
continue
file_layer = self.file_layers[file]
for tgt in self.included_into[file]:
if tgt in self.file_layers:
if self.file_layers[tgt] != file_layer:
errors.add(
'Layer mismatch: %s (%s) is included into %s (%s)' %
(file, file_layer, tgt, self.file_layers[tgt]))
self.file_layers[tgt] = file_layer
self.worklist.add(tgt)
return errors
def AddAllSourcesToWorklist(self, dir):
"""Add all *.cc and *.h files from dir recursively into worklist."""
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isdir(path):
self.AddAllSourcesToWorklist(path)
elif path.endswith('.cc') or path.endswith('.h'):
self.worklist.add(os.path.relpath(path, self.root))
def ExtractIncludes(self, file):
"""Extract the list of includes from the given file."""
deps = set()
with open(os.path.join(self.root, file), encoding='utf-8') as file:
for line in file:
if line.startswith('namespace dart {'):
break
m = INCLUDE_DIRECTIVE_RE.match(line)
if m is not None:
header = os.path.join('runtime', m.group(1))
if os.path.isfile(os.path.join(self.root, header)):
deps.add(header)
return deps
def DoCheck(sdk_root):
"""Run layering check at the given root folder."""
return LayeringChecker(sdk_root).Check()
if __name__ == '__main__':
errors = DoCheck('.')
print('\n'.join(errors))
if errors:
sys.exit(-1)
|
{
"content_hash": "a2b50d11663c1d3694e8b0efb817837c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 34.747899159663866,
"alnum_prop": 0.5535671100362757,
"repo_name": "dart-lang/sdk",
"id": "e8c955a0ee078a6ed2cc225e9f63e276252e2767",
"size": "4631",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "runtime/tools/compiler_layering_check.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "7081"
},
{
"name": "Batchfile",
"bytes": "4407"
},
{
"name": "C",
"bytes": "1393784"
},
{
"name": "C++",
"bytes": "22197561"
},
{
"name": "CMake",
"bytes": "1598"
},
{
"name": "CSS",
"bytes": "142589"
},
{
"name": "Dart",
"bytes": "142185469"
},
{
"name": "Dockerfile",
"bytes": "495"
},
{
"name": "GAP",
"bytes": "43658"
},
{
"name": "HTML",
"bytes": "746243"
},
{
"name": "Java",
"bytes": "637086"
},
{
"name": "JavaScript",
"bytes": "238808"
},
{
"name": "Makefile",
"bytes": "9320"
},
{
"name": "Objective-C++",
"bytes": "854"
},
{
"name": "Python",
"bytes": "1227015"
},
{
"name": "SCSS",
"bytes": "23561"
},
{
"name": "Shell",
"bytes": "139201"
},
{
"name": "TeX",
"bytes": "75730"
}
],
"symlink_target": ""
}
|
import marioai
__all__ = ['Task']
class Task(object):
'''A task handles communication with the environment.
It decides how to evaluate the observations, potentially returning
reinforcement rewards or fitness values. Furthermore it is a filter for
what should be visible to the agent. Also, it can potentially act as a
filter on how actions are transmitted to the environment.
Attributes:
env (Environment): the environment instance.
finished (bool): ?
reward (int): the current reward of the simulation.
status (int): ?
cum_reward (int): the sum reward since the beginning of the episode.
samples (int): number of steps in the current episode.
'''
def __init__(self, *args, **kwargs):
'''Constructor.
Args:
environment (Environment): the environment instance.
'''
self.env = marioai.Environment(*args, **kwargs)
self.finished = False
self.reward = 0
self.status = 0
self.cum_reward = 0
self.samples = 0
def reset(self):
'''Reinitialize the environment.'''
self.env.reset()
self.cum_reward = 0
self.samples = 0
self.finished = False
self.reward = 0
self.status = 0
def get_sensors(self):
'''Bridge to environment.'''
sense = self.env.get_sensors()
if len(sense) == self.env.fitness_values:
self.reward = sense[1]
self.status = sense[0]
self.finished = True
return sense
def perform_action(self, action):
'''Bridge to environment.'''
if not self.finished:
self.env.perform_action(action)
self.cum_reward += self.reward
self.samples += 1
|
{
"content_hash": "12200764e3442d106d412299d27bdf6e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 27.584615384615386,
"alnum_prop": 0.5939765755716676,
"repo_name": "renatopp/marioai",
"id": "6073a7e4b07b8bb418789a838223c849f706f3f8",
"size": "1794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marioai/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "52"
},
{
"name": "Matlab",
"bytes": "139"
},
{
"name": "Python",
"bytes": "14314"
}
],
"symlink_target": ""
}
|
import logging
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from .views import ActivitiesGenericObjectView
from .views import ActivitiesView
from .views import ActivityDeleteView
from .views import ActivityEditView
from .views import ActivityRepliesView
from .views import ActivityReplyDeleteView
from .views import ActivityReplyEditView
from .views import ActivityReplyView
from .views import ActivityView
logger = logging.getLogger(__name__)
# need to map these in order for the dynamic urls to work correctly
urlpattern_mapping = (
{'regex': r'^/?$', 'view': ActivitiesView, 'name': 'activities_view'},
{'regex': r'^/(?P<activity_id>\d+)/delete/?$', 'view': ActivityDeleteView, 'name': 'activity_delete'},
{'regex': r'^/(?P<activity_id>\d+)/edit/?$', 'view': ActivityEditView, 'name': 'activity_edit'},
{'regex': r'^/(?P<activity_id>\d+)/replies/(?P<reply_id>\d+)/delete/?$', 'view': ActivityReplyDeleteView, 'name': 'activity_reply_delete'},
{'regex': r'^/(?P<activity_id>\d+)/replies/(?P<reply_id>\d+)/edit/?$', 'view': ActivityReplyEditView, 'name': 'activity_reply_edit'},
{'regex': r'^/(?P<activity_id>\d+)/replies/(?P<reply_id>\d+)/?$', 'view': ActivityReplyView, 'name': 'activity_reply'},
{'regex': r'^/(?P<activity_id>\d+)/replies/?$', 'view': ActivityRepliesView, 'name': 'activity_replies'},
{'regex': r'^/(?P<activity_id>\d+)/?$', 'view': ActivityView, 'name': 'activity_view'},
)
urlpatterns = [
url(r'^/(?P<content_type_id>\d+)/(?P<object_id>\d+)/?$', ActivitiesGenericObjectView.as_view(), name='activities_view'),
] + [url(pattern['regex'], pattern['view'].as_view(), name=pattern['name'])
for pattern in urlpattern_mapping if pattern['name'] != 'activities_view']
def get_urls(extend_urlpatterns, root_urlpattern_name, class_prefix=None,
base_classes=None, model=None, root_urlpattern=None):
"""Function that dynamically creates activities urls so urls don't have to
use generic content type ids in the urls.
:param extend_urlpatterns: the urls patterns to extend
:param root_urlpattern_name: this is the url pattern to extend the activity
urls from.
:param class_prefix: this is the string class prefix to use for the
generated views. This will also be the prefix used for the url naming
conventions.
:param base_classes: the iterable of base classes to extend.
:param model: the model to add the urls to. If no class prefix is provided,
the model name will be used as the prefix.
:param root_urlpattern: the root pattern to create the
:param url_prefix: the prefix to use for the urls. Default is "activities"
so the urls would be generated as follows:
./activities/(?P<activity_id>\d+)/delete/?S
./activities/(?P<activity_id>\d+)/edit/?S
A call to the following:
> get_urls(my_url_patterns,
. class_prefix='Foo',
. base_classes=(LoginRequiredViewMixin,))
This generates a view class that resembles something along the lines of:
class FooActivityView(LoginRequiredViewMixin, ActivityView):
class_prefix = 'Foo'
"""
if model:
if class_prefix is None:
class_prefix = model.__name__
if not hasattr(model, 'get_activities_url'):
logger.warning("Adding activity urls to the \"{0}\" model and the "
"model doesn't implement a \"get_activities_url\" "
"method which is recommended.".format(model))
if base_classes is None:
base_classes = tuple()
if root_urlpattern is None:
for pattern in extend_urlpatterns:
# if the pattern doesn't have the "name" attribute, then it might be
# an "include" and not an actual regex pattern.
if hasattr(pattern, 'name') and pattern.name == root_urlpattern_name:
root_urlpattern = pattern.regex.pattern
break
if root_urlpattern is None:
raise ImproperlyConfigured('No url pattern found with the name: '
'{0}'.format(root_urlpattern_name))
# root url pattern can't end in any of these characters
for char in ('$', '?', '/'):
if root_urlpattern.endswith(char):
root_urlpattern = root_urlpattern[:-1]
for pattern in urlpattern_mapping:
# generate the dynamic view
pattern_view = pattern.get('view')
pattern_name = pattern.get('name')
pattern_regex = pattern.get('regex').replace('^/', '')
class_name = '{0}{1}'.format(class_prefix, pattern_view.__name__)
ExtendedActivityView = type(class_name,
base_classes + (pattern_view,),
{'class_prefix': class_prefix})
# generate the new pattern name
pattern_name = '{0}_{1}'.format(class_prefix.lower(), pattern_name)
url_pattern = r'{0}/activities/{1}'.format(root_urlpattern,
pattern_regex)
if url_pattern.startswith('^/activities/'):
url_pattern = url_pattern.replace('^/activities/', '^activities/')
# add the pattern to urls
extend_urlpatterns += [
url(url_pattern, ExtendedActivityView.as_view(), name=pattern_name),
]
|
{
"content_hash": "4f8bc37319fc3c5036e40aeab9ab004c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 143,
"avg_line_length": 43.264,
"alnum_prop": 0.6288831360946746,
"repo_name": "InfoAgeTech/django-activities",
"id": "293656d862d1d2de5de4b4e9cd9d2a14f3ed34df",
"size": "5408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "activities/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6600"
},
{
"name": "HTML",
"bytes": "18512"
},
{
"name": "JavaScript",
"bytes": "13918"
},
{
"name": "Python",
"bytes": "127869"
}
],
"symlink_target": ""
}
|
'''
Created on Nov 23, 2010
@author: bolme
'''
import svm
import numpy as np
import tempfile
import os
class UntrainedClassifierError(Exception):
pass
class _LabelMap:
'''Converts class labels back and forth to integer codes.'''
def __init__(self):
'''Create and initialze the mapping.'''
self._forward_map = {}
self._backward_map = {}
self._current_code = 0
def toCode(self,label):
'''Converts a label to an integer code. Codes are defined as needed.'''
if not self._forward_map.has_key(label):
self._forward_map[label] = self._current_code
self._backward_map[self._current_code] = label
self._current_code += 1
return self._forward_map[label]
def toLabel(self,code):
'''Converts a code back into a label'''
return self._backward_map[code]
class _LabelScale:
'''Converts class labels back and forth to integer codes.'''
def __init__(self):
'''Create and initialze the mapping.'''
self.mean = 0.0
self.std = 0.0
def train(self,labels,data):
labels = np.array(labels)
self.mean = labels.mean()
self.std = labels.std()
labels = (labels - self.mean)/self.std
return labels,data
def toScaled(self,label):
''''''
return (label - self.mean)/self.std
def toOrig(self,code):
''''''
return code * self.std + self.mean
class FeaturePreprocessor:
def __init__(self):
pass
def train(self,labels,data):
raise NotImplementedError()
def __call__(self,vector):
raise NotImplementedError()
class NoNorm(FeaturePreprocessor):
def __init__(self):
pass
def train(self,labels,data):
return labels,data
def __call__(self,vector):
return vector
class ZNormValues(FeaturePreprocessor):
def __init__(self):
pass
def train(self,labels,data):
self.means = data.mean(axis=0)
self.stds = data.std(axis=0)
n = len(self.means)
self.means.shape = (1,n)
self.stds.shape = (1,n)
# TODO: Need to correct for zero values in stds
# self.stds[np.abs(self.stds) < 1e-6] = 1.0
data = (data - self.means) / self.stds
self.means = self.means.flatten()
self.stds = self.stds.flatten()
return labels,data
def __call__(self,vector):
#if not isinstance(vector,np.ndarray):
vector = np.array(vector)
vector = (vector - self.means) / self.stds
return vector
class Classifier:
pass
class Regression:
pass
class SVC(Classifier):
def __init__(self,C=1.0,gamma=1.0,preprocessor=ZNormValues()):
'''Create a support vector machine classifier.'''
self._model = None
assert isinstance(preprocessor,FeaturePreprocessor)
self._preprocessor = preprocessor
self._C = C
self._gamma = gamma
self._label_map = _LabelMap()
def __getstate__(self):
'''This function is neccessary for pickling'''
# Translate everything but the svm because that cannot be simply pickled.
state = {}
for key,value in self.__dict__.iteritems():
if key == '_model':
filename = tempfile.mktemp()
self._model.save(filename)
data_buffer = open(filename).read()
os.remove(filename)
state[key] = data_buffer
continue
state[key] = value
return state
def __setstate__(self,state):
'''This function is neccessary for pickling'''
# Translate everything but the svm because that cannot be simply pickled.
for key,value in state.iteritems():
if key == '_model':
filename = tempfile.mktemp()
open(filename,'w').write(value)
self._model = svm.svm_model(filename)
os.remove(filename)
continue
self.__dict__[key] = value
def train(self,labels,data):
'''
Train the classifier.
@param labels: A list of class labels.
@param data: A 2D array or list of feature vectors. One feature vector per row.
'''
# Check the types and convert to np arrays
if isinstance(data,list) or isinstance(data,tuple):
data = np.array(data,dtype=np.double)
labels = [self._label_map.toCode(each) for each in labels]
labels = np.array(labels)
# Preprocess the data
labels,data = self._preprocessor.train(labels,data)
# Create the svm parameter data and problem description
param = svm.svm_parameter(svm_type=svm.C_SVC,kernel_type = svm.RBF, C = self._C, gamma=self._gamma)
prob = svm.svm_problem(labels.tolist(),data.tolist())
# train the svm
self._model = svm.svm_model(prob, param)
def __call__(self,vector):
'''Classify a feature vector.'''
if self._model == None:
raise UntrainedClassifierError()
# convert to an array
if isinstance(vector,list) or isinstance(vector,tuple):
vector = np.array(vector,dtype=np.double)
# preprocess the data
vector = self._preprocessor(vector)
# return the prediction
code = self._model.predict(vector.tolist())
return self._label_map.toLabel(code)
class SVR(Regression):
def __init__(self,epsilon=0.01,gamma=1.0,preprocessor=ZNormValues()):
'''Create a support vector machine classifier.'''
self._model = None
assert isinstance(preprocessor,FeaturePreprocessor)
self._preprocessor = preprocessor
self._label_scale = _LabelScale()
self._epsilon = epsilon
self._gamma = gamma
def __getstate__(self):
'''This function is neccessary for pickling'''
# Translate everything but the svm because that cannot be simply pickled.
state = {}
for key,value in self.__dict__.iteritems():
if key == '_model':
filename = tempfile.mktemp()
self._model.save(filename)
data_buffer = open(filename).read()
os.remove(filename)
state[key] = data_buffer
continue
state[key] = value
return state
def __setstate__(self,state):
'''This function is neccessary for pickling'''
# Translate everything but the svm because that cannot be simply pickled.
for key,value in state.iteritems():
if key == '_model':
filename = tempfile.mktemp()
open(filename,'w').write(value)
self._model = svm.svm_model(filename)
os.remove(filename)
continue
self.__dict__[key] = value
def train(self,labels,data):
'''
Train the classifier.
@param labels: A list of class labels.
@param data: A 2D array or list of feature vectors. One feature vector per row.
'''
# Check the types and convert to np arrays
if isinstance(data,list) or isinstance(data,tuple):
data = np.array(data,dtype=np.double)
labels = np.array(labels,dtype=np.double)
# Preprocess the data
labels,data = self._preprocessor.train(labels,data)
labels,data = self._label_scale.train(labels,data)
# Create the svm parameter data and problem description
param = svm.svm_parameter(svm_type=svm.EPSILON_SVR,kernel_type = svm.RBF, eps = self._epsilon, gamma=self._gamma)
prob = svm.svm_problem(labels.tolist(),data.tolist())
# train the svm
self._model = svm.svm_model(prob, param)
def __call__(self,vector):
'''Classify a feature vector.'''
if self._model == None:
raise UntrainedClassifierError()
# convert to an array
if isinstance(vector,list) or isinstance(vector,tuple):
vector = np.array(vector,dtype=np.double)
# preprocess the data
vector = self._preprocessor(vector)
# return the prediction
value = self._model.predict(vector.tolist())
return self._label_scale.toOrig(value)
|
{
"content_hash": "ceea0f7687c5df7dd98a5ce20d2348f0",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 121,
"avg_line_length": 29.44701986754967,
"alnum_prop": 0.5469470369953896,
"repo_name": "mikeseven/pyvision",
"id": "317cd6fe128375681bc1069db8bdf7a13ed7fe98",
"size": "8893",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/pyvision/ml/libsvm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1379814"
},
{
"name": "R",
"bytes": "1487"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
}
|
import sys
import json
import time
import random
import hashlib
import operator
import bitcoinrpc
import pybitcointools
from decimal import *
HEXSPACE='21' #change this to 21 if your hex decode is malformed, system dependent value
if len(sys.argv) > 1 and "--force" not in sys.argv:
print "Takes a list of bitcoind options, addresses and a send amount and outputs a transaction in JSON \nUsage: cat generateTx.json | python generateTx.py\nRequires a fully-synced *local* bitcoind node"
exit()
if "--force" in sys.argv:
#WARNING: '--force' WILL STEAL YOUR BITCOINS IF YOU DON KNOW WHAT YOU'RE DOING
force=True
else:
force=False
JSON = sys.stdin.readlines()
listOptions = json.loads(str(''.join(JSON)))
#sort out whether using local or remote API
conn = bitcoinrpc.connect_to_local()
#check for testnet addr
privkey_char1 = listOptions['from_private_key'][0]
if privkey_char1 == 'c' or privkey_char1 == '9':
testnet=True
else:
testnet=False
if testnet:
pass #do no check here
else:
#check if private key provided produces correct address
address = pybitcointools.privkey_to_address(listOptions['from_private_key'])
if not address == listOptions['transaction_from'] and not force:
print json.dumps({ "status": "NOT OK", "error": "Private key does not produce same address as \'transaction from\'" , "fix": "Set \'force\' flag to proceed without address checks" })
exit()
#see if account has been added
account = conn.getaccount(listOptions['transaction_from'])
if account == "" and not force:
_time = str(int(time.time()))
private = listOptions['from_private_key']
print json.dumps({ "status": "NOT OK", "error": "Couldn\'t find address in wallet, please run \'fix\' on the machine", "fix": "bitcoind importprivkey " + private + " imported_" + _time })
#calculate minimum unspent balance
available_balance = Decimal(0.0)
unspent_tx = []
for unspent in conn.listunspent():
if unspent.address == listOptions['transaction_from']:
unspent_tx.append(unspent)
#get all unspent for our from_address
for unspent in unspent_tx:
available_balance = unspent.amount + available_balance
#check if minimum BTC balance is met
#print available_balance, 0.00006*3
if available_balance < Decimal(0.00006*3) and not force:
print json.dumps({ "status": "NOT OK", "error": "Not enough funds" , "fix": "Set \'force\' flag to proceed without balance checks" })
exit()
#generate public key of bitcoin address
validated = conn.validateaddress(listOptions['transaction_from'])
if 'pubkey' in validated.__dict__:
pubkey = validated.pubkey
elif not force:
print json.dumps({ "status": "NOT OK", "error": "from address is invalid or hasn't been used on the network" , "fix": "Set \'force\' flag to proceed without balance checks" })
exit()
#find spendable input from UTXO
smallest_spendable_input = { "txid": "", "amount": Decimal(0) }
for unspent in unspent_tx:
if Decimal(unspent.amount) > Decimal(0.0004) and (smallest_spendable_input['amount'] == Decimal(0) or unspent.amount < smallest_spendable_input['amount']):
smallest_spendable_input = { "txid": unspent.txid, "amount": unspent.amount }
#real stuff happens here:
broadcast_fee = 0.0001
output_minimum = 0.0006 #dust threshold
fee_total = Decimal(0.0001) + Decimal(0.00006 * 4)
change = smallest_spendable_input['amount'] - fee_total
# calculate change :
# (total input amount) - (broadcast fee) - (total transaction fee)
#print fee_total, smallest_spendable_input['amount']
if (Decimal(change) < Decimal(0) or fee_total > smallest_spendable_input['amount']) and not force:
print json.dumps({ "status": "NOT OK", "error": "Not enough funds" , "fix": "Set \'force\' flag to proceed without balance checks" })
exit()
#build multisig data address
from_address = listOptions['transaction_from']
transaction_type = 0 #simple send
sequence_number = 1 #packet number
currency_id = int(listOptions['currency']) #MSC
amount = int(listOptions['msc_send_amt']*1e8) #maran's impl used float??
cleartext_packet = (
(hex(sequence_number)[2:].rjust(2,"0") +
hex(transaction_type)[2:].rjust(8,"0") +
hex(currency_id)[2:].rjust(8,"0") +
hex(amount)[2:].rjust(16,"0") ).ljust(62,"0") )
sha_the_sender = hashlib.sha256(from_address).hexdigest().upper()[0:-2]
# [0:-2] because we remove last ECDSA byte from SHA digest
cleartext_bytes = map(ord,cleartext_packet.decode('hex')) #convert to bytes for xor
shathesender_bytes = map(ord,sha_the_sender.decode('hex')) #convert to bytes for xor
msc_data_key = ''.join(map(lambda xor_target: hex(operator.xor(xor_target[0],xor_target[1]))[2:].rjust(2,"0"),zip(cleartext_bytes,shathesender_bytes))).upper()
#map operation that xor's the bytes from cleartext and shathesender together
#to obfuscate the cleartext packet, for more see Appendix Class B:
#https://github.com/faizkhan00/spec#class-b-transactions-also-known-as-the-multisig-method
obfuscated = "02" + msc_data_key + "00"
#add key identifier and ecdsa byte to new mastercoin data key
if testnet:
data_pubkey = obfuscated[:-2] + hex(random.randint(0,255))[2:].rjust(2,"0").upper()
else:
invalid = True
while invalid:
obfuscated_randbyte = obfuscated[:-2] + hex(random.randint(0,255))[2:].rjust(2,"0").upper()
#set the last byte to something random in case we generated an invalid pubkey
potential_data_address = pybitcointools.pubkey_to_address(obfuscated_randbyte)
if bool(conn.validateaddress(potential_data_address).isvalid):
data_pubkey = obfuscated_randbyte
invalid = False
#make sure the public key is valid using pybitcointools, if not, regenerate
#the last byte of the key and try again
#### Build transaction
#retrieve raw transaction to spend it
prev_tx = conn.getrawtransaction(smallest_spendable_input['txid'])
validnextinputs = [] #get valid redeemable inputs
for output in prev_tx.vout:
if output['scriptPubKey']['reqSigs'] == 1 and output['scriptPubKey']['type'] != 'multisig':
for address in output['scriptPubKey']['addresses']:
if address == listOptions['transaction_from']:
validnextinputs.append({ "txid": prev_tx.txid, "vout": output['n']})
if testnet:
exodus = "n1eXodd53V4eQP96QmJPYTG2oBuFwbq6kL"
else:
exodus = "1EXoDusjGwvnjZUyKkxZ4UHEf77z6A5S4P"
validnextoutputs = { exodus : 0.00006 , listOptions['transaction_to'] : 0.00006 }
if change > Decimal(0.00006): # send anything above dust to yourself
validnextoutputs[ listOptions['transaction_from'] ] = float(change)
unsigned_raw_tx = conn.createrawtransaction(validnextinputs, validnextoutputs)
json_tx = conn.decoderawtransaction(unsigned_raw_tx)
#add multisig output to json object
json_tx['vout'].append({ "scriptPubKey": { "hex": "51" + HEXSPACE + pubkey + "21" + data_pubkey.lower() + "52ae", "asm": "1 " + pubkey + " " + data_pubkey.lower() + " 2 OP_CHECKMULTISIG", "reqSigs": 1, "type": "multisig", "addresses": [ pybitcointools.pubkey_to_address(pubkey), pybitcointools.pubkey_to_address(data_pubkey) ] }, "value": 0.00006*2, "n": len(validnextoutputs)})
#construct byte arrays for transaction
#assert to verify byte lengths are OK
version = ['01', '00', '00', '00' ]
assert len(version) == 4
num_inputs = [str(len(json_tx['vin'])).rjust(2,"0")]
assert len(num_inputs) == 1
num_outputs = [str(len(json_tx['vout'])).rjust(2,"0")]
assert len(num_outputs) == 1
sequence = ['FF', 'FF', 'FF', 'FF']
assert len(sequence) == 4
blocklocktime = ['00', '00', '00', '00']
assert len(blocklocktime) == 4
#prepare inputs data for byte packing
inputsdata = []
for _input in json_tx['vin']:
prior_input_txhash = _input['txid'].upper()
prior_input_index = str(_input['vout']).rjust(2,"0").ljust(8,"0")
input_raw_signature = _input['scriptSig']['hex']
prior_txhash_bytes = [prior_input_txhash[ start: start + 2 ] for start in range(0, len(prior_input_txhash), 2)][::-1]
assert len(prior_txhash_bytes) == 32
prior_txindex_bytes = [prior_input_index[ start: start + 2 ] for start in range(0, len(prior_input_index), 2)]
assert len(prior_txindex_bytes) == 4
len_scriptsig = ['%02x' % len(''.join([]).decode('hex').lower())]
assert len(len_scriptsig) == 1
inputsdata.append([prior_txhash_bytes, prior_txindex_bytes, len_scriptsig])
#prepare outputs for byte packing
output_hex = []
for output in json_tx['vout']:
value_hex = hex(int(float(output['value'])*1e8))[2:]
value_hex = value_hex.rjust(16,"0")
value_bytes = [value_hex[ start: start + 2 ].upper() for start in range(0, len(value_hex), 2)][::-1]
assert len(value_bytes) == 8
scriptpubkey_hex = output['scriptPubKey']['hex']
scriptpubkey_bytes = [scriptpubkey_hex[start:start + 2].upper() for start in range(0, len(scriptpubkey_hex), 2)]
len_scriptpubkey = ['%02x' % len(''.join(scriptpubkey_bytes).decode('hex').lower())]
#assert len(scriptpubkey_bytes) == 25 or len(scriptpubkey_bytes) == 71
output_hex.append([value_bytes, len_scriptpubkey, scriptpubkey_bytes] )
#join parts into final byte array
hex_transaction = version + num_inputs
for _input in inputsdata:
hex_transaction += (_input[0] + _input[1] + _input[2] + sequence)
hex_transaction += num_outputs
for output in output_hex:
hex_transaction = hex_transaction + (output[0] + output[1] + output[2])
hex_transaction = hex_transaction + blocklocktime
#verify that transaction is valid
assert type(conn.decoderawtransaction(''.join(hex_transaction).lower())) == type({})
#sign it
signed_transaction = conn.signrawtransaction(''.join(hex_transaction))
#output final product as JSON
print json.dumps({ "rawtransaction": signed_transaction })
|
{
"content_hash": "ef923027b8268cac10b3db3479067345",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 378,
"avg_line_length": 40.607438016528924,
"alnum_prop": 0.6887147654421492,
"repo_name": "thehobbit85/coinpowers",
"id": "0575e096f2ad5737836607956afc98eb767fcdef",
"size": "9840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generateTX00.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "91327"
},
{
"name": "Python",
"bytes": "26677"
}
],
"symlink_target": ""
}
|
"""
commswave
=========
Takes device communications up and down according to a timefunction.
Comms will be working whenever the timefunction returns non-zero.
Configurable parameters::
{
"timefunction" : A timefunction definition
"threshold" : (optional) Comms will only work when the timefunction is returning >= threshold. If missing then any non-zero value will make comms work.
"gate_properties" : (optional) ["list", "of", "properties"] If this is defined, then instead of taking whole comms up and down, only these specific properties are gated
}
Device properties created::
{
"connected" : A flag which shows if we are currently connected (TCP- and therefore MQTT-like)
}
"""
from .device import Device
from common import importer
import logging
import inspect
class Commswave(Device):
def __init__(self, instance_name, time, engine, update_callback, context, params):
"""Take Comms up and down according to some time function"""
tf = params["commswave"]["timefunction"]
self.comms_timefunction = importer.get_class("timefunction", list(tf.keys())[0])(engine, self, tf[list(tf.keys())[0]])
self.comms_tf_threshold = params["commswave"].get("threshold", None)
self.comms_gate_properties = params["commswave"].get("gate_properties", None)
self.messages_sent = 0
self.messages_attempted = 0
super(Commswave,self).__init__(instance_name, time, engine, update_callback, context, params) # This may send messages (calling our comms_ok() and transmit() functions), so we need to be set-up-enough before calling it
self.set_property("connected", self.timefunction_says_communicate())
self.engine.register_event_at(self.comms_timefunction.next_change(), self.tick_commswave, self, self) # Tick the "connected" flag
def timefunction_says_communicate(self):
thresh = 0.0
if self.comms_tf_threshold is not None:
thresh = self.comms_tf_threshold
return self.comms_timefunction.state() > thresh
def comms_ok(self):
if self.comms_gate_properties is not None: # If we're gating individual properties, then don't gate overall comms
return super(Commswave, self).comms_ok()
else:
self.messages_attempted += 1
is_ok = super(Commswave, self).comms_ok()
is_ok = is_ok and self.timefunction_says_communicate()
if is_ok:
self.messages_sent += 1
return is_ok
def transmit(self, the_id, ts, properties, force_comms):
if self.comms_gate_properties is not None: # We're gating properties
if not self.timefunction_says_communicate():
for p in self.comms_gate_properties:
properties.pop(p, None) # Remove the property, if it's there
# logging.info("commswave.transmit("+str(properties)+") called from "+str(inspect.stack()[1]))
super(Commswave, self).transmit(the_id, ts, properties, force_comms)
def external_event(self, event_name, arg):
super(Commswave, self).external_event(event_name, arg)
def close(self):
super(Commswave,self).close()
logging.info("Comms report for " + str(self.properties["$id"]) + " " +
str(self.messages_sent) + " sent ("+str(100 * self.messages_sent/self.messages_attempted) + "%) from " +
str(self.messages_attempted) + " total")
# Private methods
def tick_commswave(self, _):
state = self.timefunction_says_communicate()
logging.info("commswave: comms going " + ["offline","online"][state] + " for device " + str(self.properties["$id"]))
self.set_property("connected", state, force_send=True) # We have to force send, because otherwise the fact that we've just set connected to false will cause our own comms_ok() function to prevent this transmission!
self.engine.register_event_at(self.comms_timefunction.next_change(), self.tick_commswave, self, self)
|
{
"content_hash": "23c6fe11ce677a7486cf13687e83c373",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 228,
"avg_line_length": 48.65060240963855,
"alnum_prop": 0.6619613670133729,
"repo_name": "DevicePilot/synth",
"id": "da77e9a33aefa09030505cc60fe92fd3ecc85acf",
"size": "4038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synth/devices/commswave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "628644"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
}
|
"""
BostonBikr.py will allow the user to import a map (from a SQL database or local files) and run path-finding calculations on it.
"""
from math import sin, cos, sqrt, atan2, acos, radians
from Queue import Queue
from sets import Set
#import pymysql as mdb
import pickle
import networkx as nx
import matplotlib.pyplot as plt
from random import randint, sample, choice
import operator
from geneticAlgorithm import geneticPath
from geojson import Feature, Point, FeatureCollection
"""
DEFINE CONSTANTS
R = radius of Earth
meter* = estimate for Boston geocodes
"""
R = 6373000
maxVal = 999999.9
meterPerLat = 82190.6
meterPerLng = 111230
"""
DISTANCE CALCULATION FUNCTIONS
"""
def cor2ID(cor):
#convert list to tuple to serve as node key
tp = (cor[0], cor[1])
return tp
def distanceCal4par(lon1, lat1, lon2, lat2):
#compute the distance between (lon1, lat1) and (lon2, lat2)
lon1 = radians(lon1)
lat1 = radians(lat1)
lon2 = radians(lon2)
lat2 = radians(lat2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
distance = R * c
return distance
def distanceCal(cor1, cor2):
# print "distanceCal called!"
# print "Here is cor1: " + str(cor1)
# print "Here is cor2: " + str(cor2)
return distanceCal4par(cor1[0], cor1[1], cor2[0], cor2[1])
def calPathDisSlow(linCor):
#Calculate the tot dis of entire path from scratch
print "calPathDisSlow called!"
pathLen = 0
for idx in xrange(1,len(linCor)):
delLen = distanceCal(linCor[idx], linCor[idx-1])
#print delLen
pathLen += delLen
return pathLen
def lenCal(vec):
#length of vector
return sqrt(vec[0]**2+vec[1]**2)
def directionalVec(u, v):
#return the unit directional vetor from pt u to pt v
vec = ((u[0]-v[0])*meterPerLng, (u[1]-v[1])*meterPerLat)
vecLen = lenCal(vec)
vec = (vec[0]/vecLen, vec[1]/vecLen)
return vec
def innerProduct(u, v):
#suppose u and v are already unit vector
return u[0]*v[0]+u[1]*v[1]
###
#Scoring function
###
def distScore(curDis, targetDis):
#penalize on the difference between current
#distance and target distance
return (curDis-targetDis)**2/targetDis**2
#p is onePath <type list>
def turnScore(p):
#penalize on turns
score=0
if (len(p)>=3):
for i in xrange(0, (len(p) - 2)):
u = directionalVec(p[i], p[i+1])
v = directionalVec(p[i+1], p[i+2])
prod = innerProduct(u, v)
prod = min(1,max(prod,-1))
angle = acos(prod) #in radians
score=score+angle
return score
#p is onePath <type list>
def repScore(p, curDis):
#penalize on repetition of path
score = 0
edgeSet = Set()
for idx in xrange(1, len(p)):
key = (p[idx-1], p[idx])
alterKey = (p[idx], p[idx-1])
if key not in edgeSet:
edgeSet.add(key)
else:
score += distanceCal(p[idx], p[idx-1])
if alterKey not in edgeSet:
edgeSet.add(alterKey)
else:
score += distanceCal(p[idx], p[idx-1])
return score/curDis
#p is onePath <type list>, curDis targDist double
def totScoreCal(path, curDis, targetDis):
#total penalize score, ratios are chosen s.t. penalty
#coming from different sources have similar variance
turnRatio = 0.02
disRatio = 10
repRatio = 10
tScore = turnScore(path)
dScore = distScore(curDis, targetDis)
rScore = repScore(path, curDis)
totScore = turnRatio*tScore + disRatio*dScore + repRatio*rScore
return totScore
"""
STAY CLASSY
Here's where we define Vertex and its metaclass Graph.
We'll use methods in these classes to generate a clean map and more!
"""
class Vertex:
#cor is a tuple of (lon, lat)
def __init__(self, cor):
self.id = cor
self.connectedTo = {}
def addNeighbor(self, nbrID, dist=0, score=1):
self.connectedTo[nbrID] = [dist, score]
def __str__(self):
#print overload
s = str(self.id) + ' connectedTo: '
for x in self.connectedTo:
s += str(x) + ' d='+str(self.connectedTo[x][0])
s += ', s=' + str(self.connectedTo[x][1])+'; '
return s
def getConnections(self):
return self.connectedTo.keys()
def neighborNumber(self):
return len(self.connectedTo)
def getID(self):
return self.id
def getLon(self):
return self.id[0]
def getLat(self):
return self.id[1]
def getLength(self,nbrID):
return self.connectedTo[nbrID][0]
def getScore(self, nbrID):
return self.connectedTo[nbrID][1]
class Graph(Vertex):
def __init__(self):
self.vertList = {}
self.numVertices = 0
self.numEdges= 0
def recountVandE(self):
self.numVertices = 0
self.numEdges = 0
for u in self.getVertices():
self.numVertices += 1
self.numEdges += len(self.vertList[u].getConnections())
def addVertex(self, v):
self.numVertices += 1
newVertex = Vertex(v)
self.vertList[v] = newVertex
return newVertex
def getVertex(self,n):
if n in self.vertList:
return self.vertList[n]
else:
return None
def __contains__(self,n):
return n in self.vertList
#note that f, t are tuples cor(lon, lat) here
def addEdge(self, f, t, dist=0, score=1, oneWay=False):
if f not in self.vertList:
nv = self.addVertex(f)
if t not in self.vertList[f].getConnections():
self.numEdges += 1
self.vertList[f].addNeighbor(t, dist, score)
if not oneWay:
if t not in self.vertList:
nv = self.addVertex(t)
if f not in self.vertList[t].getConnections():
self.numEdges += 1
self.vertList[t].addNeighbor(f, dist, score)
def getVertices(self):
return self.vertList.keys()
def __str__(self):
for v in self.vertList:
print self.vertList[v]
return ''
def removeVertex(self, delVID):
if delVID in self.vertList:
self.numVertices -= 1
self.numEdges -= len(self.vertList[delVID].getConnections())
del self.vertList[delVID]
#Note this only delete the edge from u to v, not vice versa
def removeEdge(self, u, v):
if u in self.vertList:
if v in self.vertList[u].getConnections():
self.numEdges -= 1
[dis, score] = self.vertList[u].connectedTo[v]
del self.vertList[u].connectedTo[v]
return (dis, score)
else:
return (-1, 0)
#This function remove the middle point u and concatenate its
#in and out edge
def removeMiddlePt(self, u):
twoNeighbors = self.vertList[u].getConnections()
for v in twoNeighbors:
self.removeEdge(v, u)
self.addEdge(twoNeighbors[0], twoNeighbors[1])
self.removeVertex(u)
#combine all nodes in the combineSet, return their COM combined newNode
def combine(self, combineSet):
x=0
y=0
for u in combineSet:
x+=u[0]
y+=u[1]
newND = (x/len(combineSet), y/len(combineSet))
self.addVertex(newND)
for u in combineSet:
for nb in self.vertList[u].getConnections():
if nb not in combineSet:
self.removeEdge(nb, u)
self.addEdge(nb, newND)
self.addEdge(newND, nb)
self.removeVertex(u)
return newND
def calPathDis(self, path):
#Calculate the tot dis of entire path from preCalDist
pathLen = 0
for idx in xrange(1,len(path)):
fNode = path[idx-1]
tNode = path[idx]
pathLen += self.vertList[fNode].connectedTo[tNode][0]
return pathLen
def findNearestNode(self, lookUpNode, NNnode):
#"find the closest node to the geocoded location"
minDist = maxVal
for node in self.vertList:
curDist = distanceCal(node, lookUpNode)
if curDist < minDist:
minDist = curDist
NNnode[1] = node[1]
NNnode[0] = node[0]
return minDist
def __iter__(self):
return iter(self.vertList.values())
def ccBFS(self, startN, visited, conComponent):
###
#This is to count the number of vertices inside each connected component
#remove isolated islands
###
visited.add(startN)
conComponent['conND'].add(startN)
conComponent['ct']+=1
BFSqueue = Queue()
BFSqueue.put(startN)
while not BFSqueue.empty():
nd = BFSqueue.get()
if nd in self.vertList:
for conND in self.vertList[nd].getConnections():
if conND not in conComponent['conND']:
visited.add(conND)
conComponent['conND'].add(conND)
conComponent['ct']+=1
BFSqueue.put(conND)
"""
WEB STUFF!
Here, we have methods to:
1. convert an 'address' <type string> to a geocoordinate (GeoCode)
2. take a 'geoItem' and turn it into a geoJSON type (GeoJsonify)
3. define a boundary and check that our coordinates are within it (inBounds)
4. build a dictionary from a set (buildDict)
5. create a 'MiniWorld' map, in which we query our sql database for a subset
of geolocation data for pathfinding (createMiniWorld)
6. put it all together! (PathTestMashUp)
"""
def GeoCode(address):
# take 'address' <type string> and get geocoordinates
import json
from urllib2 import urlopen
from urllib import quote
# encode address query into URL
url = 'https://maps.googleapis.com/maps/api/geocode/json?address={}&sensor=false&key={}'.format(quote(address, gAPI_key))
# call API and extract json
print 'Calling Google for the following address: ' + address
jData = urlopen(url).read()
jData = json.loads(jData.decode('utf-8')) # THIS MIGHT THROW AN ERROR
# extract coordinates (latitude, longitude)
if jData.get('status') == 'ZERO_RESULTS':
latitude, longitude = None, None
print 'The following address was not found: ' + address
else:
latitude, longitude = (value for _, value in sorted(jData.get('results')[0].get('geometry').get('location').items()))
print 'Your location is at the following coordinates: {:f}, {:f}'.format(longitude, latitude)
return (longitude, latitude)
def GeoJsonify(geoItem):
if isinstance(geoItem, list):
geoJSON = {
'type' : 'Feature',
'properties': {'stroke': '#914791'},
'geometry' :{
'type' : 'LineString',
'coordinates': geoItem,
}
}
elif isinstance(geoItem, tuple):
geoJSON = {
'type' : 'Feature',
'geometry' : {
'type' : 'Point',
'coordinates' : [geoItem[1], geoItem[0]],
}
}
return geoJSON
def GeoJsonifyMarkers(markerList):
features_list = []
for m in markerList:
m_url = 'https://www.google.com/search?espv=2&biw=1600&bih=791&site=webhp&q=' + str(m[0])
features_list.append(Feature(geometry=Point(tuple(m[1:])), properties={'title':str(m[0]), 'marker-color':'#751975', 'url': m_url}))
return features_list
def GeoJsonifyEndpoints(start, end):
start_url = 'https://www.google.com/search?espv=2&biw=1600&bih=791&site=webhp&q=' + str(start[0])
end_url = 'https://www.google.com/search?espv=2&biw=1600&bih=791&site=webhp&q=' + str(end[0])
start = Feature(geometry=Point(tuple(start[1:])), properties={'title':str(start[0]), 'marker-color':'#47D147', 'url': start_url})
end = Feature(geometry=Point(tuple(end[1:])), properties={'title':str(end[0]), 'marker-color':'#FF3300', 'url': end_url})
return start, end
def inBounds(node, bounds):
#bounds = [[minX, minY], [maxX, maxY]]
flag1 = (node.getLat()<bounds[1][1] and node.getLat()>bounds[0][1])
flag2 = (node.getLon()<bounds[1][0] and node.getLon()>bounds[0][0])
if flag1 and flag2:
return True
return False
def buildDict(vSet, gDict):
for v in vSet:
gDict[v.getID()] = {'Dist': maxVal, 'pred':None}
return
def getMapBoundary():
#define the boundary of the miniworld
# In this toy version, you can load this pickle from the Static folder
# The real version calls edges from a database
bounds = pickle.load(open("./static/bostonMetroArea_bounds.p", "rb"))
return bounds
def findNearestNodeNX(graph, lookUpNode):
# Find the closest node to your geocoded location
minDist = maxVal
for node in graph.nodes():
curDist = distanceCal(node, lookUpNode)
if curDist < minDist:
minDist = curDist
minNode = node
return minDist, minNode
def miniGraph2NX(miniGraph):
# Convert our miniGraph to NX object
# NOTE: in the future, have SQL-->NX directly
cleanG = miniGraph
nxG = nx.Graph()
nodes = cleanG.vertList.keys()
nodes = dict(zip(nodes,nodes))
for node in nodes:
nxG.add_node(node)
for neighbor in cleanG.vertList[node].getConnections():
length = distanceCal(node, neighbor)
nxG.add_edge(node, neighbor, weight=length)
return nxG, nodes
def nxPlot(nxGraph, nxPos):
plt.figure(1, figsize=(12,12))
nx.draw(nxGraph, pos=nxPos, node_size=5)
plt.show()
def nxShortestPath(nxGraph, nxPos, startPt, endPt, Dijk=0):
if Dijk == 0:
nxList = nx.shortest_path(nxGraph, source=startPt, target=endPt)
score = nx.shortest_path_length(nxGraph, source=startPt, target=endPt)
dist = nx.shortest_path_length(nxGraph, source=startPt, target=endPt, weight='distance')
elif Dijk == 1:
nxList = nx.dijkstra_path(nxGraph, source=startPt, target=endPt, weight='weight')
score = nx.dijkstra_path_length(nxGraph, source=startPt, target=endPt, weight='weight')
dist = nx.dijkstra_path_length(nxGraph, source=startPt, target=endPt, weight='distance')
nxH = nx.subgraph(nxGraph, nxList)
return nxList, nxH, score, dist
def getRealPathLength(myPath):
pathLength = 0
lengths = []
for i in range(len(myPath)-1):
lengths.append(distanceCal(myPath[i], myPath[i+1]))
pathLength += lengths[-1]
return pathLength
def plotPath(fullGraph, pathGraph, nodePos):
nxGraph = fullGraph
nxH = pathGraph
nxPos = nodePos
fig = plt.figure(figsize=(16,16))
ax = fig.add_subplot(111)
nx.draw(nxGraph, pos=nxPos, node_size=2)
nx.draw(nxH, pos=nxPos, node_size=40, width=5, edge_color='r')
ax.plot()
def PathTestMashUp(startPt, endPt, runDis=3):
"""
WHERE THE MAGIC HAPPENS!
The website will call this function.
"""
## Load up your necessary variables
# In this toy version, you can load this pickle from the Static folder
# The real version calls edges from a database upon each query and rebuilds the map around your start and end coordinates
nxGraph = pickle.load(open("./static/bostonMetroArea_Weighted_Locs.p", "rb"))
nxPos = pickle.load(open("./static/bostonMetroArea_pos.p", "rb"))
targetDis = runDis*1000+1 # convert km to m
# Use the Google to find geolocation ,type tuple> for your start/endPt <type string>
startCor = GeoCode(startPt)
endCor = GeoCode(endPt)
startDist, startNode = findNearestNodeNX(nxGraph, startCor)
endDist, endNode = findNearestNodeNX(nxGraph, endCor)
# to prevent crashes, shift one node slightly to a neighbor
if startNode == endNode:
endNode = nx.neighbors(nxGraph, endNode)[0]
print 'The closest node found to startPt is {} from dist {}'.format(startNode, startDist)
print 'The closest node found to endPt is {} with dist {}'.format(endNode, endDist)
# Ensure you're within the boundaries of your world
bounds = getMapBoundary()
print "Boundaries found: {}".format(bounds)
## PATHFINDERS
# Calculate weighted and unweighted Dijkstras
shortestPath_uw, nxH_uw, _, pathLength_uw = nxShortestPath(nxGraph, nxPos, startNode, endNode, Dijk=0)
shortestPath_w, nxH_w, _, pathLength_w = nxShortestPath(nxGraph, nxPos, startNode, endNode, Dijk=1)
# Run the genetic algorithm!
gene = geneticPath(startNode, endNode, targetDis)
shortestPath_g, pathLength_g, error_g = gene.Evolution()
nxH_g = nx.subgraph(nxGraph, gene.finalSpecies)
nxH = nxH_g
shortestPath = shortestPath_g
shortestPath.append(list(endCor))
pathLength = pathLength_g
message = 'Here is a {:.0f} km path for you.'.format(pathLength_g/1000.0)
# Get the locations for any interesting nodes
pathLocations = []
pathNodes = []
pathLocales = []
for edge in nxH.edges(data=True):
if edge[2]['location'] is not None:
pathLocations.append(edge[2]['location'])
pathNodes.append(edge[1])
pathLocales.append([edge[2]['location'], edge[1][0], edge[1][1]])
pathLocations = list(set(pathLocations))
pathNodes = list(set(pathNodes))
pathLocales = [list(x) for x in set(tuple(x) for x in pathLocales)]
unique_locales=[]
unique_strings=[]
for ls in pathLocales:
if ls[0] not in unique_strings:
unique_strings.append(ls[0])
unique_locales.append(ls)
pathLocales = unique_locales
message += " Enjoy your ride!"
# Create the new map layer with path and markers
# turn locales in geojson object
markers = GeoJsonifyMarkers(pathLocales)
# add start and end
startGeo, endGeo = GeoJsonifyEndpoints([startPt, startCor[0], startCor[1]], [endPt, endCor[0], endCor[1]])
# add the path and endpoints as a geojson object
markers.append(GeoJsonify(shortestPath))
markers.append(startGeo)
markers.append(endGeo)
# create final layer as a Feature Collection
geoMarkers = FeatureCollection(markers)
# Create locales list of lists
json = {
'bounds': bounds,
'startPt': GeoJsonify(startCor),
'endPt': GeoJsonify(endCor),
'dist': pathLength,
'path': geoMarkers,
'message': message,
'locales': GeoJsonifyMarkers(pathLocales)
}
return json # FOR APP
# return json, shortestPath, pathLength, nxH, pathLocales, pathNodes # FOR TESTING
if __name__ == "__main__":
# Test run
start = 'Fenway, Boston, MA'
end = 'Fresh Pond, MA'
distance = 16
json = PathTestMashUp(start, end, distance)
|
{
"content_hash": "0db3a672a39d88f7a2246634cac6bcf8",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 139,
"avg_line_length": 34.06951871657754,
"alnum_prop": 0.6107361481714016,
"repo_name": "gaurav-kaushik/BostonBikr",
"id": "bf7617a5528db8f714ca07aea6d7cac5982ed152",
"size": "19137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/BostonBikr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1664"
},
{
"name": "HTML",
"bytes": "16772"
},
{
"name": "JavaScript",
"bytes": "3263"
},
{
"name": "OpenEdge ABL",
"bytes": "50303946"
},
{
"name": "Python",
"bytes": "72085"
}
],
"symlink_target": ""
}
|
import sys
import gzip
allS = gzip.open(sys.argv[2])
allPeaks = {}
otherPeaks = {}
otherPercs = {}
otherIntervals = {}
for i in range(1, 23):
allPeaks["chr%d" % i] = []
otherPeaks["chr%d" % i] = []
# read in all autosomal peaks
for line in allS:
parts = line.strip().split()
chrom = parts[0]
if chrom == "chrX": continue
if chrom == "chrY": continue
if chrom == "X": continue
if chrom == "Y": continue
center = parts[6]
allPeaks[chrom].append(int(center))
# read in DHSes
otherS = open(sys.argv[1])
sampleName= sys.argv[1].split("/")[-1]
sampleName = sampleName.split(".")[0]
for line in otherS:
parts = line.strip().split()
chrom = parts[0]
name = chrom + ":" + parts[1]
# name = parts[3]
if chrom == "chrX": continue
if chrom == "chrY": continue
if chrom == "X": continue
if chrom == "Y": continue
center = int(parts[1]) + ((int(parts[2]) - int (parts[1])) / 2)
otherPeaks[chrom].append(int(center))
otherPercs[chrom+":"+str(center)] = parts[7]
otherIntervals[chrom+ ":"+str(center)] = chrom+":"+parts[1]+":"+parts[2]
otherS.close()
for chrom, v in otherPeaks.items():
otherPeaks[chrom] = sorted(v)
# find the minimum distance to the peak upstream of the center,
# and the peak downstream of the center, and then find the
# distance between those two peaks: ...^...|.........^...
# ============== this distance
for count in range(1, 23):
chrom = "chr%d" % count
otheri = 0
allj = 0
counter = 0
while otheri < len(otherPeaks[chrom]):
minneg = 999999
minpos = 999999
counter += 1
if allj >= len(allPeaks[chrom]):
allj -= 1
mymin = otherPeaks[chrom][otheri] - allPeaks[chrom][allj]
if mymin < 0: minneg = abs(mymin)
elif mymin > 0: minpos = mymin
else: # mymin == 0
# not sure what the "right" way to handle this is
minpos = 0
nextj = allj
# now try decreasing j
origj = allj
allj -= 1
while allj >= 0:
newdist = otherPeaks[chrom][otheri] - allPeaks[chrom][allj]
# if distance is negative, keep decreasing j
if newdist < 0:
if abs(newdist) < abs(minneg):
minneg = abs(newdist)
allj -= 1
continue
# otherwise, distance is positive
if newdist < minpos:
minpos = newdist
nextj = allj
allj -=1
else:
# stop
break
# now look at negative distances
allj = origj + 1
while allj < len(allPeaks[chrom]):
newdist = otherPeaks[chrom][otheri] - allPeaks[chrom][allj]
if newdist > 0: # gotta keep going
if newdist < minpos:
minpos = newdist
allj += 1
continue
# otherwise, distance is negative
if abs(newdist) < abs(minneg):
minneg = abs(newdist)
nextj = allj
allj += 1
else:
# the distance is increasing
break
# okay, now calculate the distance between the two
totaldistance = minpos + minneg - 1
print "%s\t%s\t%s\t%d\t%d\t%d\t%s" % (chrom, otherIntervals[chrom+":"+str(otherPeaks[chrom][otheri])].split(":")[1], otherIntervals[chrom+":"+str(otherPeaks[chrom][otheri])].split(":")[2], minneg, minpos, totaldistance, otherPercs[chrom+":"+str(otherPeaks[chrom][otheri])])
allj = nextj
otheri += 1
|
{
"content_hash": "e4fba9394a326b5753fba3babc6db2d5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 281,
"avg_line_length": 30.536585365853657,
"alnum_prop": 0.5191693290734825,
"repo_name": "shendurelab/cfDNA",
"id": "f31a74c06db8814db96fa897bca071fecd021029",
"size": "3756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DHS/DHSdistances.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159744"
},
{
"name": "R",
"bytes": "30185"
}
],
"symlink_target": ""
}
|
import copy
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestHuffmanTree(unittest.TestCase):
def test_empty(self):
with self.assertRaises(ValueError):
links.BinaryHierarchicalSoftmax.create_huffman_tree({})
def test_simple(self):
tree = links.BinaryHierarchicalSoftmax.create_huffman_tree(
{'x': 8, 'y': 6, 'z': 5, 'w': 4, 'v': 3})
expect = (('z', 'y'), (('v', 'w'), 'x'))
self.assertEqual(expect, tree)
def test_same_count(self):
tree = links.BinaryHierarchicalSoftmax.create_huffman_tree(
{'x': 1, 'y': 2, 'z': 3})
# Order of the same items are not defined.
self.assertTrue((('x', 'y'), 'z') == tree or
('z', ('x', 'y')) == tree)
class TestBinaryHierarchicalSoftmax(unittest.TestCase):
def setUp(self):
tree = ((0, 1), ((2, 3), 4))
self.link = links.BinaryHierarchicalSoftmax(3, tree)
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.array([0, 2]).astype(numpy.int32)
self.gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
self.W = self.link.W.data.copy()
def check_sum(self, x, gpu=False):
total = 0
for i in range(5):
t = numpy.array([i], dtype=numpy.int32)
if gpu:
t = cuda.to_gpu(t)
loss = self.link(chainer.Variable(x), chainer.Variable(t)).data
self.assertEqual(loss.dtype, numpy.float32)
self.assertEqual(loss.shape, ())
total += numpy.exp(-cuda.to_cpu(loss))
self.assertAlmostEqual(1.0, float(total), delta=1.0e-5)
@condition.retry(3)
def test_sum_cpu(self):
x = numpy.array([[1.0, 2.0, 3.0]], numpy.float32)
self.check_sum(x)
@attr.gpu
@condition.retry(3)
def test_sum_gpu(self):
x = numpy.array([[1.0, 2.0, 3.0]], numpy.float32)
self.link.to_gpu()
self.check_sum(cuda.to_gpu(x), gpu=True)
@attr.gpu
def test_forward(self):
# TODO(unno): We need to test return values of forward function.
cpu_loss = self.link(chainer.Variable(self.x),
chainer.Variable(self.t)).data
self.link.to_gpu()
gpu_loss = self.link(chainer.Variable(cuda.to_gpu(self.x)),
chainer.Variable(cuda.to_gpu(self.t))).data
testing.assert_allclose(
cpu_loss, cuda.to_cpu(gpu_loss))
def check_backward(self, x_data, t_data, y_grad):
gradient_check.check_backward(
self.link, (x_data, t_data), y_grad, self.link.W,
atol=1e-4, rtol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
@attr.gpu
def test_to_cpu(self):
f = copy.deepcopy(self.link)._func
self.link.to_gpu()
self.link.to_cpu()
g = self.link._func
self.assertTrue((f.begins == g.begins).all())
self.assertTrue((f.paths == g.paths).all())
self.assertTrue((f.codes == g.codes).all())
testing.run_module(__name__, __file__)
|
{
"content_hash": "3ec22ea545d9573f084149d03e3963a6",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 32.747747747747745,
"alnum_prop": 0.5716643741403026,
"repo_name": "aonotas/chainer",
"id": "ead908e34f0b92f27203a893fd73b975f7759894",
"size": "3635",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/links_tests/loss_tests/test_hierarchical_softmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
}
|
import numpy as np
from skimage.util import img_as_float
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image)
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
if image.ndim != 2:
raise ValueError("Only 2-D gray-scale images supported.")
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
|
{
"content_hash": "0e7184a87652f5aa03ed1adf871e2185",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 29.354037267080745,
"alnum_prop": 0.5854845535336437,
"repo_name": "chintak/scikit-image",
"id": "8ee2baf8470ae3d11bb94141fd35015ce6685bcc",
"size": "4726",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "skimage/feature/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70225"
},
{
"name": "CSS",
"bytes": "3629"
},
{
"name": "JavaScript",
"bytes": "777"
},
{
"name": "Python",
"bytes": "2115723"
},
{
"name": "Shell",
"bytes": "3346"
}
],
"symlink_target": ""
}
|
"""
Support for Västtrafik public transport.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.vasttrafik/
"""
from datetime import datetime
from datetime import timedelta
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['vtjp==0.1.14']
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESSIBILITY = 'accessibility'
ATTR_DIRECTION = 'direction'
ATTR_LINE = 'line'
ATTR_TRACK = 'track'
ATTRIBUTION = "Data provided by Västtrafik"
CONF_DELAY = 'delay'
CONF_DEPARTURES = 'departures'
CONF_FROM = 'from'
CONF_HEADING = 'heading'
CONF_LINES = 'lines'
CONF_KEY = 'key'
CONF_SECRET = 'secret'
DEFAULT_DELAY = 0
ICON = 'mdi:train'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_KEY): cv.string,
vol.Required(CONF_SECRET): cv.string,
vol.Optional(CONF_DEPARTURES): [{
vol.Required(CONF_FROM): cv.string,
vol.Optional(CONF_DELAY, default=DEFAULT_DELAY): cv.positive_int,
vol.Optional(CONF_HEADING): cv.string,
vol.Optional(CONF_LINES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME): cv.string}]
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the departure sensor."""
import vasttrafik
planner = vasttrafik.JournyPlanner(
config.get(CONF_KEY), config.get(CONF_SECRET))
sensors = []
for departure in config.get(CONF_DEPARTURES):
sensors.append(
VasttrafikDepartureSensor(
vasttrafik, planner, departure.get(CONF_NAME),
departure.get(CONF_FROM), departure.get(CONF_HEADING),
departure.get(CONF_LINES), departure.get(CONF_DELAY)))
add_entities(sensors, True)
class VasttrafikDepartureSensor(Entity):
"""Implementation of a Vasttrafik Departure Sensor."""
def __init__(self, vasttrafik, planner, name, departure, heading,
lines, delay):
"""Initialize the sensor."""
self._vasttrafik = vasttrafik
self._planner = planner
self._name = name or departure
self._departure = planner.location_name(departure)[0]
self._heading = (planner.location_name(heading)[0]
if heading else None)
self._lines = lines if lines else None
self._delay = timedelta(minutes=delay)
self._departureboard = None
self._state = None
self._attributes = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def state(self):
"""Return the next departure time."""
return self._state
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the departure board."""
try:
self._departureboard = self._planner.departureboard(
self._departure['id'],
direction=self._heading['id'] if self._heading else None,
date=datetime.now()+self._delay)
except self._vasttrafik.Error:
_LOGGER.debug("Unable to read departure board, updating token")
self._planner.update_token()
if not self._departureboard:
_LOGGER.debug(
"No departures from %s heading %s",
self._departure['name'],
self._heading['name'] if self._heading else 'ANY')
self._state = None
self._attributes = {}
else:
for departure in self._departureboard:
line = departure.get('sname')
if not self._lines or line in self._lines:
if 'rtTime' in self._departureboard[0]:
self._state = self._departureboard[0]['rtTime']
else:
self._state = self._departureboard[0]['time']
params = {
ATTR_ACCESSIBILITY: departure.get('accessibility'),
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_DIRECTION: departure.get('direction'),
ATTR_LINE: departure.get('sname'),
ATTR_TRACK: departure.get('track'),
}
self._attributes = {
k: v for k, v in params.items() if v}
break
|
{
"content_hash": "c2579ba4d5f3feec9328b4da049dea6d",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 75,
"avg_line_length": 33.28378378378378,
"alnum_prop": 0.6033292732440114,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "8148a5c2fc7bdf2b43ce0e8303595a0e81ded94a",
"size": "4928",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/vasttrafik.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
import json
import numpy as np
from shapely.geometry import LineString, Point
from modules.planning.proto import planning_pb2
from common.statistical_analyzer import StatisticalAnalyzer
from common.statistical_analyzer import PrintColors
from common.distribution_analyzer import DistributionAnalyzer
from common.error_code_analyzer import ErrorCodeAnalyzer
from common.error_msg_analyzer import ErrorMsgAnalyzer
from common.frechet_distance import frechet_distance
class PlannigAnalyzer:
"""planning analyzer"""
def __init__(self, is_simulation):
"""init"""
self.module_latency = []
self.trajectory_type_dist = {}
self.estop_reason_dist = {}
self.error_code_analyzer = ErrorCodeAnalyzer()
self.error_msg_analyzer = ErrorMsgAnalyzer()
self.last_adc_trajectory = None
self.frechet_distance_list = []
self.is_simulation = is_simulation
self.hard_break_list = []
self.total_cycle_num = 0
self.init_point_curvature = []
self.init_point_dcurvature = []
def put(self, adc_trajectory):
self.total_cycle_num += 1
"""put"""
if not self.is_simulation:
latency = adc_trajectory.latency_stats.total_time_ms
self.module_latency.append(latency)
self.error_code_analyzer.put(
adc_trajectory.header.status.error_code)
self.error_msg_analyzer.put(adc_trajectory.header.status.msg)
traj_type = planning_pb2.ADCTrajectory.TrajectoryType.Name(
adc_trajectory.trajectory_type)
self.trajectory_type_dist[traj_type] = \
self.trajectory_type_dist.get(traj_type, 0) + 1
if adc_trajectory.estop.is_estop:
self.estop_reason_dist[adc_trajectory.estop.reason] = \
self.estop_reason_dist.get(
adc_trajectory.estop.reason, 0) + 1
if self.is_simulation:
for point in adc_trajectory.trajectory_point:
if point.a <= -2.0:
self.hard_break_list.append(point.a)
if adc_trajectory.debug.planning_data.HasField('init_point'):
self.init_point_curvature.append(
abs(adc_trajectory.debug.planning_data.init_point.path_point.kappa))
self.init_point_dcurvature.append(
abs(adc_trajectory.debug.planning_data.init_point.path_point.dkappa))
# TODO(yifei) temporarily disable frechet distance
#if self.last_adc_trajectory is not None and self.is_simulation:
# current_path, last_path = self.find_common_path(adc_trajectory,
# self.last_adc_trajectory)
# if len(current_path) == 0 or len(last_path) == 0:
# dist = 0
# else:
# dist = frechet_distance(current_path, last_path)
# if dist is not None:
# self.frechet_distance_list.append(dist)
self.last_adc_trajectory = adc_trajectory
def find_common_path(self, current_adc_trajectory, last_adc_trajectory):
current_path_points = current_adc_trajectory.trajectory_point
last_path_points = last_adc_trajectory.trajectory_point
current_path = []
for point in current_path_points:
current_path.append([point.path_point.x, point.path_point.y])
if point.path_point.s > 5.0:
break
last_path = []
for point in last_path_points:
last_path.append([point.path_point.x, point.path_point.y])
if point.path_point.s > 5.0:
break
if len(current_path) == 0 or len(last_path) == 0:
return [], []
current_ls = LineString(current_path)
last_ls = LineString(last_path)
current_start_point = Point(current_path[0])
dist = last_ls.project(current_start_point)
cut_lines = self.cut(last_ls, dist)
if len(cut_lines) == 1:
return [], []
last_ls = cut_lines[1]
dist = current_ls.project(Point(last_path[-1]))
if dist <= current_ls.length:
current_ls = self.cut(current_ls, dist)[0]
else:
dist = last_ls.project(Point(current_path[-1]))
last_ls = self.cut(last_ls, dist)[0]
return current_ls.coords, last_ls.coords
def cut(self, line, distance):
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [
LineString(coords[:i+1]),
LineString(coords[i:])]
if pd > distance:
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:])]
def print_latency_statistics(self):
"""print_latency_statistics"""
print "\n\n"
print PrintColors.HEADER + "--- Planning Latency (ms) ---" + \
PrintColors.ENDC
StatisticalAnalyzer().print_statistical_results(self.module_latency)
print PrintColors.HEADER + "--- Planning Trajectroy Type Distribution" \
" ---" + PrintColors.ENDC
DistributionAnalyzer().print_distribution_results(
self.trajectory_type_dist)
print PrintColors.HEADER + "--- Planning Estop Distribution" \
" ---" + PrintColors.ENDC
DistributionAnalyzer().print_distribution_results(
self.estop_reason_dist)
print PrintColors.HEADER + "--- Planning Error Code Distribution---" + \
PrintColors.ENDC
self.error_code_analyzer.print_results()
print PrintColors.HEADER + "--- Planning Error Msg Distribution ---" + \
PrintColors.ENDC
self.error_msg_analyzer.print_results()
print PrintColors.HEADER + "--- Planning Trajectory Frechet Distance (m) ---" + \
PrintColors.ENDC
StatisticalAnalyzer().print_statistical_results(self.frechet_distance_list)
def print_simulation_results(self):
results = {}
results['hard_brake_cnt'] = len(self.hard_break_list)
if len(self.init_point_curvature) > 0:
results['curvature_max'] = max(self.init_point_curvature, key=abs)
curvature_avg = np.average(np.absolute(self.init_point_curvature))
results['curvature_avg'] = curvature_avg
else:
# TODO(yifei) will change to None after the change of dreamland
results['curvature_max'] = -99999
results['curvature_avg'] = -99999
if len(self.init_point_dcurvature) > 0:
results['dcurvature_max'] = max(self.init_point_dcurvature, key=abs)
dcurvature_avg = np.average(np.absolute(self.init_point_dcurvature))
results['dcurvature_avg'] = dcurvature_avg
else:
# TODO(yifei) will change to None after the change of dreamland
results['dcurvature_max'] = -99999
results['dcurvature_avg'] = -99999
results['overall_score'] = 1 - results['hard_brake_cnt'] /\
float(self.total_cycle_num)
print json.dumps(results)
def plot_path(self, plt, adc_trajectory):
path_coords = self.trim_path_by_distance(adc_trajectory, 5.0)
x = []
y = []
for point in path_coords:
x.append(point[0])
y.append(point[1])
plt.plot(x, y, 'r-', alpha=0.5)
def plot_refpath(self, plt, adc_trajectory):
for path in adc_trajectory.debug.planning_data.path:
if path.name != 'planning_reference_line':
continue
path_coords = self.trim_path_by_distance(adc_trajectory, 5.0)
ref_path_coord = []
for point in path.path_point:
ref_path_coord.append([point.x, point.y])
ref_path = LineString(ref_path_coord)
start_point = Point(path_coords[0])
dist = ref_path.project(start_point)
ref_path = self.cut(ref_path, dist)[1]
end_point = Point(path_coords[-1])
dist = ref_path.project(end_point)
ref_path = self.cut(ref_path, dist)[0]
x = []
y = []
for point in ref_path.coords:
x.append(point[0])
y.append(point[1])
plt.plot(x, y, 'b--', alpha=0.5)
def trim_path_by_distance(self, adc_trajectory, s):
path_coords = []
path_points = adc_trajectory.trajectory_point
for point in path_points:
if point.path_point.s <= s:
path_coords.append([point.path_point.x, point.path_point.y])
return path_coords
|
{
"content_hash": "07bfaef0f24b314587b500f4c88037b4",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 89,
"avg_line_length": 39.924778761061944,
"alnum_prop": 0.5777457608334257,
"repo_name": "msbeta/apollo",
"id": "6c3b8912e4c474a21cd09731f9bd120d70dddc15",
"size": "9807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/tools/record_analyzer/module_planning_analyzer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1117"
},
{
"name": "C",
"bytes": "23596"
},
{
"name": "C++",
"bytes": "15304648"
},
{
"name": "CMake",
"bytes": "3601"
},
{
"name": "CSS",
"bytes": "39401"
},
{
"name": "Cuda",
"bytes": "91842"
},
{
"name": "Dockerfile",
"bytes": "2032"
},
{
"name": "GLSL",
"bytes": "7033"
},
{
"name": "HTML",
"bytes": "21068"
},
{
"name": "JavaScript",
"bytes": "331412"
},
{
"name": "Python",
"bytes": "1644200"
},
{
"name": "Shell",
"bytes": "273395"
},
{
"name": "Smarty",
"bytes": "33099"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CoreReplicationTopology'
db.create_table(u'physical_corereplicationtopology', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'physical', ['CoreReplicationTopology'])
# Adding M2M table for field replication_topology on 'CoreReplicationTopology'
m2m_table_name = db.shorten_name(u'physical_corereplicationtopology_replication_topology')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('corereplicationtopology', models.ForeignKey(orm[u'physical.corereplicationtopology'], null=False)),
('replicationtopology', models.ForeignKey(orm[u'physical.replicationtopology'], null=False))
))
db.create_unique(m2m_table_name, ['corereplicationtopology_id', 'replicationtopology_id'])
def backwards(self, orm):
# Deleting model 'CoreReplicationTopology'
db.delete_table(u'physical_corereplicationtopology')
# Removing M2M table for field replication_topology on 'CoreReplicationTopology'
db.delete_table(db.shorten_name(u'physical_corereplicationtopology_replication_topology'))
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.corereplicationtopology': {
'Meta': {'object_name': 'CoreReplicationTopology'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'replication_topology': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maintenance_day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maintenance_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'infra'", 'null': 'True', 'to': u"orm['physical.Pool']"}),
'service_account': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssl_mode': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_path_ol7': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'required_disk_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_description': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provisioner': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tsuru_deploy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'private_key': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'root_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ssl_expire_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.ip': {
'Meta': {'object_name': 'Ip'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'persistense_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_persisted_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.pool': {
'Meta': {'object_name': 'Pool'},
'cluster_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dbaas_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'pools'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'project_id': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'rancher_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rancher_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'storageclass': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'pools'", 'symmetrical': 'False', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vpc': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recreate_slave': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'configure_log': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vipinstancegroup': {
'Meta': {'unique_together': "((u'vip', u'name'),)", 'object_name': 'VipInstanceGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']"})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
{
"content_hash": "9a4e4d9e8e29c7594085933b9287ae13",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 239,
"avg_line_length": 93.05489260143199,
"alnum_prop": 0.5647088997178764,
"repo_name": "globocom/database-as-a-service",
"id": "c5b0a451ae0ab151b1753db5639378cda17e65b7",
"size": "39014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/physical/migrations/0118_auto__add_corereplicationtopology.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
}
|
"""Test the wallet."""
from decimal import Decimal
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [[
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
# Only need nodes 0-2 running at start of test
self.stop_node(3)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
self.sync_all(self.nodes[0:3])
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['vsize']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all(self.nodes[0:3])
self.nodes[1].generate(101)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("test gettxout")
confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"]
# First, outputs that are unspent both in the chain and in the
# mempool should appear with or without include_mempool
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True)
assert_equal(txout['value'], 50)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.log.info("test gettxout (second part)")
# utxo spent in mempool should be visible if you exclude mempool
# but invisible if you include mempool
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True)
assert txout is None
# new utxo from mempool should be invisible if you exclude mempool
# but visible if you include mempool
txout = self.nodes[0].gettxout(mempool_txid, 0, False)
assert txout is None
txout1 = self.nodes[0].gettxout(mempool_txid, 0, True)
txout2 = self.nodes[0].gettxout(mempool_txid, 1, True)
# note the mempool tx will have randomly assigned indices
# but 10 will go to node2 and the rest will go to node0
balance = self.nodes[0].getbalance()
assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0])
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds",
self.nodes[2].lockunspent, False,
[{"txid": unspent_0["txid"], "vout": 999}])
# An output should be unlocked when spent
unspent_0 = self.nodes[1].listunspent()[0]
self.nodes[1].lockunspent(False, [unspent_0])
tx = self.nodes[1].createrawtransaction([unspent_0], { self.nodes[1].getnewaddress() : 1 })
tx = self.nodes[1].fundrawtransaction(tx)['hex']
tx = self.nodes[1].signrawtransactionwithwallet(tx)["hex"]
self.nodes[1].sendrawtransaction(tx)
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all(self.nodes[0:3])
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100 - 21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransactionwithwallet(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], 0)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], 0)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
# Verify that a spent output cannot be locked anymore
spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0])
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
self.start_node(3)
connect_nodes_bi(self.nodes, 0, 3)
self.sync_all()
# check if we can list zero value tx as available coins
# 1. create raw_tx
# 2. hex-changed one output to 0.0
# 3. sign and send
# 4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent(query_options={'minimumAmount': '49.998'})[0]
inputs = [{"txid": usp['txid'], "vout": usp['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
raw_tx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") # replace 11.11 with 0.0 (int32)
signed_raw_tx = self.nodes[1].signrawtransactionwithwallet(raw_tx)
decoded_raw_tx = self.nodes[1].decoderawtransaction(signed_raw_tx['hex'])
zero_value_txid = decoded_raw_tx['txid']
self.nodes[1].sendrawtransaction(signed_raw_tx['hex'])
self.sync_all()
self.nodes[1].generate(1) # mine a block
self.sync_all()
unspent_txs = self.nodes[0].listunspent() # zero value tx must be in listunspents output
found = False
for uTx in unspent_txs:
if uTx['txid'] == zero_value_txid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert found
# do some -walletbroadcast tests
self.stop_nodes()
self.start_node(0, ["-walletbroadcast=0"])
self.start_node(1, ["-walletbroadcast=0"])
self.start_node(2, ["-walletbroadcast=0"])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
self.sync_all(self.nodes[0:3])
txid_not_broadcast = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
self.nodes[1].generate(1) # mine a block, tx should not be in there
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[2].getbalance(), node_2_bal) # should not be changed because tx was not broadcasted
# now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(tx_obj_not_broadcast['hex'])
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal += 2
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# create another tx
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
# restart the nodes with -walletbroadcast=1
self.stop_nodes()
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
self.sync_blocks(self.nodes[0:3])
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:3])
node_2_bal += 2
# tx should be added to balance because after restarting the nodes tx should be broadcast
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# send a tx with value in a string (PR#6380 +)
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-2'))
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# check if JSON parser can handle scientific notation in strings
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# General checks for errors from incorrect inputs
# This will raise an exception because the amount type is wrong
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# This will raise an exception for the invalid private key format
assert_raises_rpc_error(-5, "Invalid private key encoding", self.nodes[0].importprivkey, "invalid")
# This will raise an exception for importing an address with the PS2H flag
temp_address = self.nodes[1].getnewaddress()
assert_raises_rpc_error(-5, "Cannot use the p2sh flag with an address - use a script instead", self.nodes[0].importaddress, temp_address, "label", False, True)
# This will raise an exception for attempting to dump the private key of an address you do not own
assert_raises_rpc_error(-3, "Address does not refer to a key", self.nodes[0].dumpprivkey, temp_address)
# This will raise an exception for attempting to get the private key of an invalid Bitcoin address
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].dumpprivkey, "invalid")
# This will raise an exception for attempting to set a label for an invalid Bitcoin address
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].setlabel, "invalid address", "label")
# This will raise an exception for importing an invalid address
assert_raises_rpc_error(-5, "Invalid Bitcoin address or script", self.nodes[0].importaddress, "invalid")
# This will raise an exception for attempting to import a pubkey that isn't in hex
assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex")
# This will raise an exception for importing an invalid pubkey
assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"]
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
coinbase_addr = self.nodes[1].getnewaddress()
block_hash = self.nodes[0].generatetoaddress(1, coinbase_addr)[0]
coinbase_txid = self.nodes[0].getblock(block_hash)['tx'][0]
self.sync_all(self.nodes[0:3])
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(coinbase_txid)
# check if wallet or blockchain maintenance changes the balance
self.sync_all(self.nodes[0:3])
blocks = self.nodes[0].generate(2)
self.sync_all(self.nodes[0:3])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].rpc.ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for label in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getnewaddress()
self.nodes[0].setlabel(addr, label)
assert_equal(self.nodes[0].getaddressinfo(addr)['label'], label)
assert label in self.nodes[0].listlabels()
self.nodes[0].rpc.ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(1, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(2, [m, "-limitancestorcount=" + str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], {chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')})
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit * 2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
assert_equal(len(txid_list), chainlimit * 2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert extra_txid not in self.nodes[0].getrawmempool()
assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*", 99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
self.stop_node(0)
self.start_node(0, extra_args=["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit * 2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-4, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))
# Test getaddressinfo on external address. Note that these addresses are taken from disablewallet.py
assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy")
address_info = self.nodes[0].getaddressinfo("mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info['address'], "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac")
assert not address_info["ismine"]
assert not address_info["iswatchonly"]
assert not address_info["isscript"]
assert not address_info["ischange"]
# Test getaddressinfo 'ischange' field on change address.
self.nodes[0].generate(1)
destination = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(destination, 0.123)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]]
assert len(output_addresses) > 1
for address in output_addresses:
ischange = self.nodes[0].getaddressinfo(address)['ischange']
assert_equal(ischange, address != destination)
if ischange:
change = address
self.nodes[0].setlabel(change, 'foobar')
assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False)
# Test gettransaction response with different arguments.
self.log.info("Testing gettransaction response with different arguments...")
self.nodes[0].setlabel(change, 'baz')
baz = self.nodes[0].listtransactions(label="baz", count=1)[0]
expected_receive_vout = {"label": "baz",
"address": baz["address"],
"amount": baz["amount"],
"category": baz["category"],
"vout": baz["vout"]}
expected_fields = frozenset({'amount', 'bip125-replaceable', 'confirmations', 'details', 'fee',
'hex', 'time', 'timereceived', 'trusted', 'txid', 'walletconflicts'})
verbose_field = "decoded"
expected_verbose_fields = expected_fields | {verbose_field}
self.log.debug("Testing gettransaction response without verbose")
tx = self.nodes[0].gettransaction(txid=txid)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to False")
tx = self.nodes[0].gettransaction(txid=txid, verbose=False)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to True")
tx = self.nodes[0].gettransaction(txid=txid, verbose=True)
assert_equal(set([*tx]), expected_verbose_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
assert_equal(tx[verbose_field], self.nodes[0].decoderawtransaction(tx["hex"]))
if __name__ == '__main__':
WalletTest().main()
|
{
"content_hash": "d29ffe4c3c45a09493fa3eac9c434874",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 193,
"avg_line_length": 50.74716981132075,
"alnum_prop": 0.6284949434860202,
"repo_name": "nikkitan/bitcoin",
"id": "ce04110240e6da032b6346492ac01e14214752a0",
"size": "27117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/wallet_basic.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "771098"
},
{
"name": "C++",
"bytes": "6472795"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "203213"
},
{
"name": "Makefile",
"bytes": "123995"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1626583"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "98382"
}
],
"symlink_target": ""
}
|
class CException(Exception):
message = ''
err_no = -1
def __init__(self, err_no, message=''):
self.err_no = err_no
self.message = message
def __str__(self):
if self.message == '':
return ('An Unknown C exception occurred! Error: {}'
.format(self.err_no))
else:
return '{}. (Error {})'.format(self.message, self.err_no)
def c_svd_update_feature(train_points, users, user_offsets, movies, residuals,
movie_averages, feature, num_features, learn_rate,
k_factor):
import ctypes
from ctypes import c_void_p, c_int32, c_float
import os
from utils.data_paths import LIBRARY_DIR_PATH
num_train_points = train_points.shape[0]
num_users = users.shape[0]
num_movies = movies.shape[0]
library_file_name = 'svd.so'
library_file_path = os.path.join(LIBRARY_DIR_PATH, library_file_name)
svd_lib = ctypes.cdll.LoadLibrary(library_file_path)
c_update_feature = svd_lib.c_update_feature
returned_value = c_update_feature(
c_void_p(train_points.ctypes.data), # (void*) train_points
c_int32(num_train_points), # (int) num_train_points
c_void_p(users.ctypes.data), # (void*) users
c_void_p(user_offsets.ctypes.data), # (void*) user_offsets
c_int32(num_users), # (int) num_users
c_void_p(movies.ctypes.data), # (void*) movies
c_void_p(movie_averages.ctypes.data), # (void*) movie_averages
c_int32(num_movies), # (int) num_movies
c_void_p(residuals.ctypes.data), # (void*) residuals
c_float(learn_rate), # (float) learn_rate
c_int32(feature), # (int) feature
c_int32(num_features), # (int) num_features
c_float(k_factor) # (float) k_factor
)
if returned_value != 0:
raise CException(returned_value)
def c_svd_euclidean_train_epoch(train_points, users, user_offsets, movies,
movie_averages, num_features, learn_rate,
k_factor):
import ctypes
from ctypes import c_void_p, c_int32, c_float
import os
from utils.data_paths import LIBRARY_DIR_PATH
num_train_points = train_points.shape[0]
num_users = users.shape[0]
num_movies = movies.shape[0]
library_file_name = 'svd_euclidean.so'
library_file_path = os.path.join(LIBRARY_DIR_PATH, library_file_name)
svd_euclidean_lib = ctypes.cdll.LoadLibrary(library_file_path)
c_train_epoch = svd_euclidean_lib.c_train_epoch
returned_value = c_train_epoch(
c_void_p(train_points.ctypes.data), # (void*) train_points
c_int32(num_train_points), # (int) num_train_points
c_void_p(users.ctypes.data), # (void*) users
c_void_p(user_offsets.ctypes.data), # (void*) user_offsets
c_int32(num_users), # (int) num_users
c_void_p(movies.ctypes.data), # (void*) movies
c_void_p(movie_averages.ctypes.data), # (void*) movie_averages
c_int32(num_movies), # (int) num_movies
c_float(learn_rate), # (float) learn_rate
c_int32(num_features), # (int) num_features
c_float(k_factor) # (float) k_factor
)
if returned_value != 0:
raise CException(returned_value)
|
{
"content_hash": "2d2c238d8029bae6d8c88f169afb290f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 46.03846153846154,
"alnum_prop": 0.55583402951824,
"repo_name": "jvanbrug/netflix",
"id": "73eaa654fd5d80916001b71ff9e995c2f9885703",
"size": "3591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/c_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5381"
},
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Python",
"bytes": "97404"
}
],
"symlink_target": ""
}
|
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
token = client.tokens.create()
print(token.username)
|
{
"content_hash": "20882f1f751c54c02bd8f674f6255eda",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 32.27272727272727,
"alnum_prop": 0.7661971830985915,
"repo_name": "TwilioDevEd/api-snippets",
"id": "1df0c780a0bd1e6796fadf8ac8e5ea3e55afbb7e",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stun-turn/list-post-example/list-post-example.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
def reduce_columns(question):
"""
Coursera formats survey responses as columns per available responses
for multiple choice questions. This function reduces the dimensionality
of each question to a single column.
Typically the input should look like:
question = reduce_columns(survey[survey_questions['Which of the following descriptions best characterizes you?']])
The output is a numpy array. This makes it easy to convert to an indexed pandas Series.
"""
single_column = []
for row in question.values:
"""
row[0] will always be the session_user_id
"""
#print 'row: ', row
response_index = []
for i, val in enumerate(row):
if val == 1:
#print 'i, val: ', i, val
response_index.append(i)
#print 'ri in loop: ', response_index
#print 'ri after loop: ', response_index
#print 'len: ', len(response_index)
if len(response_index) == 0:
single_column.append((row[0], 'No Response.'))
else:
single_column.append((row[0], question.columns[response_index[-1]]))
return np.array(single_column)
|
{
"content_hash": "eac584afd3d43cd67b1ded432283b734",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 118,
"avg_line_length": 30.825,
"alnum_prop": 0.5904298459042985,
"repo_name": "mnky9800n/GT-Coursera-Data-Wrangler",
"id": "496d27b39f42e7c92dfeb07126b90d0ee14c7eec",
"size": "1275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/reduce_columns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2259"
}
],
"symlink_target": ""
}
|
__version__ = '0.14.2a0'
from . import hdrs # noqa
from .protocol import * # noqa
from .connector import * # noqa
from .client import * # noqa
from .errors import * # noqa
from .helpers import * # noqa
from .parsers import * # noqa
from .streams import * # noqa
from .multidict import * # noqa
__all__ = (client.__all__ +
errors.__all__ +
helpers.__all__ +
parsers.__all__ +
protocol.__all__ +
connector.__all__ +
streams.__all__ +
multidict.__all__ +
['hdrs', '__version__'])
|
{
"content_hash": "c7596d9b060c05cb316764144d68b5b0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 35,
"avg_line_length": 25.130434782608695,
"alnum_prop": 0.5173010380622838,
"repo_name": "saghul/aiohttp",
"id": "ff5dd4100e935c976968f92cbd7ee21d8c566e0d",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7854"
},
{
"name": "Python",
"bytes": "629196"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
}
|
"""
Device for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/integrations/zha/
"""
import asyncio
from datetime import timedelta
from enum import Enum
import logging
import time
import zigpy.exceptions
import zigpy.quirks
from zigpy.profiles import zha, zll
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.event import async_track_time_interval
from .channels import EventRelayChannel
from .const import (
ATTR_ARGS,
ATTR_ATTRIBUTE,
ATTR_AVAILABLE,
ATTR_CLUSTER_ID,
ATTR_COMMAND,
ATTR_COMMAND_TYPE,
ATTR_ENDPOINT_ID,
ATTR_IEEE,
ATTR_LAST_SEEN,
ATTR_LQI,
ATTR_MANUFACTURER,
ATTR_MANUFACTURER_CODE,
ATTR_MODEL,
ATTR_NAME,
ATTR_NWK,
ATTR_POWER_SOURCE,
ATTR_QUIRK_APPLIED,
ATTR_QUIRK_CLASS,
ATTR_RSSI,
ATTR_VALUE,
CHANNEL_BASIC,
CHANNEL_POWER_CONFIGURATION,
CHANNEL_ZDO,
CLUSTER_COMMAND_SERVER,
CLUSTER_COMMANDS_CLIENT,
CLUSTER_COMMANDS_SERVER,
CLUSTER_TYPE_IN,
CLUSTER_TYPE_OUT,
POWER_BATTERY_OR_UNKNOWN,
POWER_MAINS_POWERED,
SIGNAL_AVAILABLE,
UNKNOWN_MANUFACTURER,
UNKNOWN_MODEL,
)
from .helpers import LogMixin
_LOGGER = logging.getLogger(__name__)
_KEEP_ALIVE_INTERVAL = 7200
_UPDATE_ALIVE_INTERVAL = timedelta(seconds=60)
_CHECKIN_GRACE_PERIODS = 2
class DeviceStatus(Enum):
"""Status of a device."""
CREATED = 1
INITIALIZED = 2
class ZHADevice(LogMixin):
"""ZHA Zigbee device object."""
def __init__(self, hass, zigpy_device, zha_gateway):
"""Initialize the gateway."""
self.hass = hass
self._zigpy_device = zigpy_device
self._zha_gateway = zha_gateway
self.cluster_channels = {}
self._relay_channels = {}
self._all_channels = []
self._available = False
self._available_signal = "{}_{}_{}".format(
self.name, self.ieee, SIGNAL_AVAILABLE
)
self._checkins_missed_count = 2
self._unsub = async_dispatcher_connect(
self.hass, self._available_signal, self.async_initialize
)
self.quirk_applied = isinstance(self._zigpy_device, zigpy.quirks.CustomDevice)
self.quirk_class = "{}.{}".format(
self._zigpy_device.__class__.__module__,
self._zigpy_device.__class__.__name__,
)
self._available_check = async_track_time_interval(
self.hass, self._check_available, _UPDATE_ALIVE_INTERVAL
)
self.status = DeviceStatus.CREATED
@property
def name(self):
"""Return device name."""
return f"{self.manufacturer} {self.model}"
@property
def ieee(self):
"""Return ieee address for device."""
return self._zigpy_device.ieee
@property
def manufacturer(self):
"""Return manufacturer for device."""
if self._zigpy_device.manufacturer is None:
return UNKNOWN_MANUFACTURER
return self._zigpy_device.manufacturer
@property
def model(self):
"""Return model for device."""
if self._zigpy_device.model is None:
return UNKNOWN_MODEL
return self._zigpy_device.model
@property
def manufacturer_code(self):
"""Return the manufacturer code for the device."""
if self._zigpy_device.node_desc.is_valid:
return self._zigpy_device.node_desc.manufacturer_code
return None
@property
def nwk(self):
"""Return nwk for device."""
return self._zigpy_device.nwk
@property
def lqi(self):
"""Return lqi for device."""
return self._zigpy_device.lqi
@property
def rssi(self):
"""Return rssi for device."""
return self._zigpy_device.rssi
@property
def last_seen(self):
"""Return last_seen for device."""
return self._zigpy_device.last_seen
@property
def is_mains_powered(self):
"""Return true if device is mains powered."""
return self._zigpy_device.node_desc.is_mains_powered
@property
def power_source(self):
"""Return the power source for the device."""
return (
POWER_MAINS_POWERED if self.is_mains_powered else POWER_BATTERY_OR_UNKNOWN
)
@property
def is_router(self):
"""Return true if this is a routing capable device."""
return self._zigpy_device.node_desc.is_router
@property
def is_coordinator(self):
"""Return true if this device represents the coordinator."""
return self._zigpy_device.node_desc.is_coordinator
@property
def is_end_device(self):
"""Return true if this device is an end device."""
return self._zigpy_device.node_desc.is_end_device
@property
def gateway(self):
"""Return the gateway for this device."""
return self._zha_gateway
@property
def all_channels(self):
"""Return cluster channels and relay channels for device."""
return self._all_channels
@property
def device_automation_triggers(self):
"""Return the device automation triggers for this device."""
if hasattr(self._zigpy_device, "device_automation_triggers"):
return self._zigpy_device.device_automation_triggers
return None
@property
def available_signal(self):
"""Signal to use to subscribe to device availability changes."""
return self._available_signal
@property
def available(self):
"""Return True if sensor is available."""
return self._available
def set_available(self, available):
"""Set availability from restore and prevent signals."""
self._available = available
def _check_available(self, *_):
if self.last_seen is None:
self.update_available(False)
else:
difference = time.time() - self.last_seen
if difference > _KEEP_ALIVE_INTERVAL:
if self._checkins_missed_count < _CHECKIN_GRACE_PERIODS:
self._checkins_missed_count += 1
if (
CHANNEL_BASIC in self.cluster_channels
and self.manufacturer != "LUMI"
):
self.debug(
"Attempting to checkin with device - missed checkins: %s",
self._checkins_missed_count,
)
self.hass.async_create_task(
self.cluster_channels[CHANNEL_BASIC].get_attribute_value(
ATTR_MANUFACTURER, from_cache=False
)
)
else:
self.update_available(False)
else:
self.update_available(True)
self._checkins_missed_count = 0
def update_available(self, available):
"""Set sensor availability."""
if self._available != available and available:
# Update the state the first time the device comes online
async_dispatcher_send(self.hass, self._available_signal, False)
async_dispatcher_send(
self.hass, "{}_{}".format(self._available_signal, "entity"), available
)
self._available = available
@property
def device_info(self):
"""Return a device description for device."""
ieee = str(self.ieee)
time_struct = time.localtime(self.last_seen)
update_time = time.strftime("%Y-%m-%dT%H:%M:%S", time_struct)
return {
ATTR_IEEE: ieee,
ATTR_NWK: self.nwk,
ATTR_MANUFACTURER: self.manufacturer,
ATTR_MODEL: self.model,
ATTR_NAME: self.name or ieee,
ATTR_QUIRK_APPLIED: self.quirk_applied,
ATTR_QUIRK_CLASS: self.quirk_class,
ATTR_MANUFACTURER_CODE: self.manufacturer_code,
ATTR_POWER_SOURCE: self.power_source,
ATTR_LQI: self.lqi,
ATTR_RSSI: self.rssi,
ATTR_LAST_SEEN: update_time,
ATTR_AVAILABLE: self.available,
}
def add_cluster_channel(self, cluster_channel):
"""Add cluster channel to device."""
# only keep 1 power configuration channel
if (
cluster_channel.name is CHANNEL_POWER_CONFIGURATION
and CHANNEL_POWER_CONFIGURATION in self.cluster_channels
):
return
if isinstance(cluster_channel, EventRelayChannel):
self._relay_channels[cluster_channel.unique_id] = cluster_channel
self._all_channels.append(cluster_channel)
else:
self.cluster_channels[cluster_channel.name] = cluster_channel
self._all_channels.append(cluster_channel)
def get_channels_to_configure(self):
"""Get a deduped list of channels for configuration.
This goes through all channels and gets a unique list of channels to
configure. It first assembles a unique list of channels that are part
of entities while stashing relay channels off to the side. It then
takse the stashed relay channels and adds them to the list of channels
that will be returned if there isn't a channel in the list for that
cluster already. This is done to ensure each cluster is only configured
once.
"""
channel_keys = []
channels = []
relay_channels = self._relay_channels.values()
def get_key(channel):
channel_key = "ZDO"
if hasattr(channel.cluster, "cluster_id"):
channel_key = "{}_{}".format(
channel.cluster.endpoint.endpoint_id, channel.cluster.cluster_id
)
return channel_key
# first we get all unique non event channels
for channel in self.all_channels:
c_key = get_key(channel)
if c_key not in channel_keys and channel not in relay_channels:
channel_keys.append(c_key)
channels.append(channel)
# now we get event channels that still need their cluster configured
for channel in relay_channels:
channel_key = get_key(channel)
if channel_key not in channel_keys:
channel_keys.append(channel_key)
channels.append(channel)
return channels
async def async_configure(self):
"""Configure the device."""
self.debug("started configuration")
await self._execute_channel_tasks(
self.get_channels_to_configure(), "async_configure"
)
self.debug("completed configuration")
entry = self.gateway.zha_storage.async_create_or_update(self)
self.debug("stored in registry: %s", entry)
async def async_initialize(self, from_cache=False):
"""Initialize channels."""
self.debug("started initialization")
await self._execute_channel_tasks(
self.all_channels, "async_initialize", from_cache
)
self.debug("power source: %s", self.power_source)
self.status = DeviceStatus.INITIALIZED
self.debug("completed initialization")
async def _execute_channel_tasks(self, channels, task_name, *args):
"""Gather and execute a set of CHANNEL tasks."""
channel_tasks = []
semaphore = asyncio.Semaphore(3)
zdo_task = None
for channel in channels:
if channel.name == CHANNEL_ZDO:
if zdo_task is None: # We only want to do this once
zdo_task = self._async_create_task(
semaphore, channel, task_name, *args
)
else:
channel_tasks.append(
self._async_create_task(semaphore, channel, task_name, *args)
)
if zdo_task is not None:
await zdo_task
await asyncio.gather(*channel_tasks)
async def _async_create_task(self, semaphore, channel, func_name, *args):
"""Configure a single channel on this device."""
try:
async with semaphore:
await getattr(channel, func_name)(*args)
channel.debug("channel: '%s' stage succeeded", func_name)
except Exception as ex: # pylint: disable=broad-except
channel.warning("channel: '%s' stage failed ex: %s", func_name, ex)
@callback
def async_unsub_dispatcher(self):
"""Unsubscribe the dispatcher."""
self._unsub()
@callback
def async_update_last_seen(self, last_seen):
"""Set last seen on the zigpy device."""
self._zigpy_device.last_seen = last_seen
@callback
def async_get_clusters(self):
"""Get all clusters for this device."""
return {
ep_id: {
CLUSTER_TYPE_IN: endpoint.in_clusters,
CLUSTER_TYPE_OUT: endpoint.out_clusters,
}
for (ep_id, endpoint) in self._zigpy_device.endpoints.items()
if ep_id != 0
}
@callback
def async_get_std_clusters(self):
"""Get ZHA and ZLL clusters for this device."""
return {
ep_id: {
CLUSTER_TYPE_IN: endpoint.in_clusters,
CLUSTER_TYPE_OUT: endpoint.out_clusters,
}
for (ep_id, endpoint) in self._zigpy_device.endpoints.items()
if ep_id != 0 and endpoint.profile_id in (zha.PROFILE_ID, zll.PROFILE_ID)
}
@callback
def async_get_cluster(self, endpoint_id, cluster_id, cluster_type=CLUSTER_TYPE_IN):
"""Get zigbee cluster from this entity."""
clusters = self.async_get_clusters()
return clusters[endpoint_id][cluster_type][cluster_id]
@callback
def async_get_cluster_attributes(
self, endpoint_id, cluster_id, cluster_type=CLUSTER_TYPE_IN
):
"""Get zigbee attributes for specified cluster."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
return cluster.attributes
@callback
def async_get_cluster_commands(
self, endpoint_id, cluster_id, cluster_type=CLUSTER_TYPE_IN
):
"""Get zigbee commands for specified cluster."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
return {
CLUSTER_COMMANDS_CLIENT: cluster.client_commands,
CLUSTER_COMMANDS_SERVER: cluster.server_commands,
}
async def write_zigbee_attribute(
self,
endpoint_id,
cluster_id,
attribute,
value,
cluster_type=CLUSTER_TYPE_IN,
manufacturer=None,
):
"""Write a value to a zigbee attribute for a cluster in this entity."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
try:
response = await cluster.write_attributes(
{attribute: value}, manufacturer=manufacturer
)
self.debug(
"set: %s for attr: %s to cluster: %s for ept: %s - res: %s",
value,
attribute,
cluster_id,
endpoint_id,
response,
)
return response
except zigpy.exceptions.DeliveryError as exc:
self.debug(
"failed to set attribute: %s %s %s %s %s",
f"{ATTR_VALUE}: {value}",
f"{ATTR_ATTRIBUTE}: {attribute}",
f"{ATTR_CLUSTER_ID}: {cluster_id}",
f"{ATTR_ENDPOINT_ID}: {endpoint_id}",
exc,
)
return None
async def issue_cluster_command(
self,
endpoint_id,
cluster_id,
command,
command_type,
*args,
cluster_type=CLUSTER_TYPE_IN,
manufacturer=None,
):
"""Issue a command against specified zigbee cluster on this entity."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
if command_type == CLUSTER_COMMAND_SERVER:
response = await cluster.command(
command, *args, manufacturer=manufacturer, expect_reply=True
)
else:
response = await cluster.client_command(command, *args)
self.debug(
"Issued cluster command: %s %s %s %s %s %s %s",
f"{ATTR_CLUSTER_ID}: {cluster_id}",
f"{ATTR_COMMAND}: {command}",
f"{ATTR_COMMAND_TYPE}: {command_type}",
f"{ATTR_ARGS}: {args}",
f"{ATTR_CLUSTER_ID}: {cluster_type}",
f"{ATTR_MANUFACTURER}: {manufacturer}",
f"{ATTR_ENDPOINT_ID}: {endpoint_id}",
)
return response
def log(self, level, msg, *args):
"""Log a message."""
msg = "[%s](%s): " + msg
args = (self.nwk, self.model) + args
_LOGGER.log(level, msg, *args)
|
{
"content_hash": "f4f862d99d892468203c577ced4994aa",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 87,
"avg_line_length": 33.853801169590646,
"alnum_prop": 0.5836932112627397,
"repo_name": "qedi-r/home-assistant",
"id": "e5d1678ad6fa314cae030f7ffdaf68c5a039d381",
"size": "17367",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import shlex
import subprocess
import tempest.cli as cli
from tempest.openstack.common import log as logging
from tempest.tests import base
LOG = logging.getLogger(__name__)
class StressFrameworkTest(base.TestCase):
"""Basic test for the stress test framework.
"""
def _cmd(self, cmd, param):
"""Executes specified command."""
cmd = ' '.join([cmd, param])
LOG.info("running: '%s'" % cmd)
cmd_str = cmd
cmd = shlex.split(cmd)
result = ''
result_err = ''
try:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
proc = subprocess.Popen(
cmd, stdout=stdout, stderr=stderr)
result, result_err = proc.communicate()
if proc.returncode != 0:
LOG.debug('error of %s:\n%s' % (cmd_str, result_err))
raise cli.CommandFailed(proc.returncode,
cmd,
result)
finally:
LOG.debug('output of %s:\n%s' % (cmd_str, result))
return proc.returncode
def test_help_function(self):
result = self._cmd("python", "-m tempest.cmd.run_stress -h")
self.assertEqual(0, result)
|
{
"content_hash": "e983ecb252c33a1f3ca4560169cb58db",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 31.525,
"alnum_prop": 0.5408406026962728,
"repo_name": "vmahuli/tempest",
"id": "5a334c53902f2b942527155fc6b18dc0b09ca11d",
"size": "1896",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/tests/stress/test_stress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3087389"
},
{
"name": "Shell",
"bytes": "17930"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_options(option_type):
"""
option_type: string
'training' or 'diplay' or 'visualize'
"""
# name
tf.app.flags.DEFINE_string("training_name","allrob_v1","name of next training in log")
# Common
tf.app.flags.DEFINE_string("env_type", "gym", "environment type (lab or gym or maze)")
tf.app.flags.DEFINE_string("env_name", "CartPole-v1", "environment name (for lab)")
tf.app.flags.DEFINE_integer("env_max_steps", 400000, "max number of steps in environment")
tf.app.flags.DEFINE_boolean("use_base", False, "whether to use base A3C for aux network")
tf.app.flags.DEFINE_boolean("use_pixel_change", False, "whether to use pixel change")
tf.app.flags.DEFINE_boolean("use_value_replay", False, "whether to use value function replay")
tf.app.flags.DEFINE_boolean("use_reward_prediction", False, "whether to use reward prediction")
tf.app.flags.DEFINE_boolean("use_temporal_coherence", True, "whether to use temporal coherence")
tf.app.flags.DEFINE_boolean("use_proportionality", True, "whether to use proportionality")
tf.app.flags.DEFINE_boolean("use_causality", True, "whether to use causality")
tf.app.flags.DEFINE_boolean("use_repeatability", True, "whether to use repeatability")
tf.app.flags.DEFINE_string("checkpoint_dir", "/tmp/StRADRL/checkpoints", "checkpoint directory")
# For training
if option_type == 'training':
tf.app.flags.DEFINE_string("temp_dir", "/tmp/StRADRL/tensorboard/", "base directory for tensorboard")
tf.app.flags.DEFINE_string("log_dir", "/tmp/StRADRL/log/", "base directory for logs")
tf.app.flags.DEFINE_integer("max_time_step", 10**6, "max time steps")
tf.app.flags.DEFINE_integer("save_interval_step", 10**4, "saving interval steps")
tf.app.flags.DEFINE_boolean("grad_norm_clip", 40.0, "gradient norm clipping")
#base
tf.app.flags.DEFINE_float("initial_learning_rate", 1e-3, "learning rate")
tf.app.flags.DEFINE_float("gamma", 0.99, "discount factor for rewards")
tf.app.flags.DEFINE_float("entropy_beta", 0.01, "entropy regurarlization constant")
tf.app.flags.DEFINE_float("value_lambda", 0.5, "value ratio for base loss")
tf.app.flags.DEFINE_float("base_lambda", 0.97, "generalized adv. est. lamba for short-long sight")
# auxiliary
tf.app.flags.DEFINE_integer("parallel_size", 1, "parallel thread size")
tf.app.flags.DEFINE_float("aux_initial_learning_rate", 1e-3, "learning rate")
tf.app.flags.DEFINE_float("aux_lambda", 0.0, "generalized adv. est. lamba for short-long sight (aux)")
tf.app.flags.DEFINE_float("gamma_pc", 0.9, "discount factor for pixel control")
tf.app.flags.DEFINE_float("pixel_change_lambda", 0.0001, "pixel change lambda") # 0.05, 0.01 ~ 0.1 for lab, 0.0001 ~ 0.01 for gym
tf.app.flags.DEFINE_float("temporal_coherence_lambda", 1., "temporal coherence lambda")
tf.app.flags.DEFINE_float("proportionality_lambda", 100., "proportionality lambda")
tf.app.flags.DEFINE_float("causality_lambda", 1., "causality lambda")
tf.app.flags.DEFINE_float("repeatability_lambda", 100., "repeatability lambda")
tf.app.flags.DEFINE_integer("experience_history_size", 100000, "experience replay buffer size")
# queuer
tf.app.flags.DEFINE_integer("local_t_max", 20, "repeat step size")
tf.app.flags.DEFINE_integer("queue_length", 5, "max number of batches (of length local_t_max) in queue")
tf.app.flags.DEFINE_integer("env_runner_sync", 1, "number of env episodes before sync to global")
tf.app.flags.DEFINE_float("action_freq", 0, "number of actions per second in env")
# For display
if option_type == 'display':
tf.app.flags.DEFINE_string("frame_save_dir", "/tmp/StRADRL_frames", "frame save directory")
tf.app.flags.DEFINE_boolean("recording", False, "whether to record movie")
tf.app.flags.DEFINE_boolean("frame_saving", False, "whether to save frames")
return tf.app.flags.FLAGS
|
{
"content_hash": "20eda44beb5e531b4fad71a783b16668",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 133,
"avg_line_length": 55.013513513513516,
"alnum_prop": 0.7054777695897814,
"repo_name": "TheTazza/StRADRL",
"id": "a702acde9726e733d34c4242843d96d3bb334416",
"size": "4095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/options7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173515"
},
{
"name": "Shell",
"bytes": "339"
}
],
"symlink_target": ""
}
|
"""Test ESPEI's database utilities
Tests in here are heavily parameterized and represent a large fraction of the
number of tests, but a small amount of coverage.
"""
from pycalphad import variables as v
import pytest
import symengine
from symengine import Piecewise, Symbol
import espei.refdata
from espei.database_utils import initialize_database, _get_ser_data
from .testing_data import SGTE91_PURE_ELEMENTS
@pytest.mark.parametrize("element_name", SGTE91_PURE_ELEMENTS)
def test_get_ser_data_SGTE91(element_name):
"""Test that all SGTE91 elements can be read from the primary SGTE91 dataset without error"""
# Make a fake fallback dataset so we can confirm that it's pulling from the primary
FAKE_FALLBACK = "FAKE_FALLBACK_"
setattr(espei.refdata, FAKE_FALLBACK + "SER", {})
data = _get_ser_data(element_name, "SGTE91", fallback_ref_state=FAKE_FALLBACK)
assert len(data) > 0
assert isinstance(data['phase'], str)
assert isinstance(data['H298'], float)
assert isinstance(data['S298'], float)
assert isinstance(data['mass'], float)
delattr(espei.refdata, FAKE_FALLBACK + "SER")
@pytest.mark.parametrize("element_name", SGTE91_PURE_ELEMENTS)
def test_get_ser_data_falls_back_on_SGTE91(element_name):
"""Test that a reference dataset with no SER data falls back on SGTE91"""
data = _get_ser_data(element_name, "FAKE_REF_STATE")
assert len(data) > 0
assert isinstance(data['phase'], str)
assert isinstance(data['H298'], float)
assert isinstance(data['S298'], float)
assert isinstance(data['mass'], float)
def test_get_ser_data_is_successful_without_refdata():
"""Test that an element not in reference data or fallback data returns an empty dict"""
assert _get_ser_data("FAKE ELEMENT", "SGTE91") == {}
assert _get_ser_data("FAKE ELEMENT", "FAKE REF DATA") == {}
def test_database_initialization_custom_refstate():
"""Test that a custom reference state with ficticious pure elements can be used to construct a Database"""
refdata_stable = {
"Q": Piecewise((symengine.oo, True)),
"ZX": Piecewise((symengine.oo, True)),
}
refdata = {
("Q", "ALPHA"): Symbol("GHSERQQ"),
("Q", "BETA"): Symbol("GHSERQQ") + 10000.0,
("ZX", "BETA"): Symbol("GHSERZX"),
}
refdata_ser = {
'Q': {'phase': 'ALPHA', 'mass': 8.0, 'H298': 80.0, 'S298': 0.80},
'ZX': {'phase': 'BETA', 'mass': 52.0, 'H298': 520.0, 'S298': 5.20},
}
# Setup refdata
CUSTOM_REFDATA_NAME = "CUSTOM"
setattr(espei.refdata, CUSTOM_REFDATA_NAME + "Stable", refdata_stable)
setattr(espei.refdata, CUSTOM_REFDATA_NAME, refdata)
setattr(espei.refdata, CUSTOM_REFDATA_NAME + "SER", refdata_ser)
# Test
phase_models = {
"components": ["Q", "ZX"],
"phases": {
"ALPHA": {
"sublattice_model": [["Q"]],
"sublattice_site_ratios": [1],
},
"BCC": {
"aliases": ["BETA"],
"sublattice_model": [["Q", "ZX"]],
"sublattice_site_ratios": [1.0],
},
}
}
dbf = initialize_database(phase_models, CUSTOM_REFDATA_NAME)
assert set(dbf.phases.keys()) == {"ALPHA", "BCC"}
assert dbf.elements == {"Q", "ZX"}
assert dbf.species == {v.Species("Q"), v.Species("ZX")}
assert 'GHSERQQ' in dbf.symbols
assert 'GHSERZX' in dbf.symbols
assert dbf.refstates["Q"]["phase"] == "ALPHA"
assert dbf.refstates["ZX"]["phase"] == "BCC"
# Teardown refdata
delattr(espei.refdata, CUSTOM_REFDATA_NAME + "Stable")
delattr(espei.refdata, CUSTOM_REFDATA_NAME)
delattr(espei.refdata, CUSTOM_REFDATA_NAME + "SER")
def test_database_initialization_adds_GHSER_data():
phase_models = {
"components": ["CR", "NI"],
"phases": {
"FCC_A1": {
"sublattice_model": [["CR", "NI"]],
"sublattice_site_ratios": [1],
},
"BCC": {
"aliases": ["BCC_A2"],
"sublattice_model": [["CR", "NI"]],
"sublattice_site_ratios": [1.0],
},
}
}
dbf = initialize_database(phase_models, "SGTE91")
assert dbf.symbols["GHSERCR"] != symengine.S.Zero
assert dbf.symbols["GHSERNI"] != symengine.S.Zero
|
{
"content_hash": "3275b95afd58fb069086a16c60fc7d2a",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 110,
"avg_line_length": 36.46218487394958,
"alnum_prop": 0.6084351233002996,
"repo_name": "PhasesResearchLab/ESPEI",
"id": "a46320575a8b745038009680e358c43a637be46e",
"size": "4339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_database_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "587917"
}
],
"symlink_target": ""
}
|
from tempfile import NamedTemporaryFile
from pprint import pformat
from itertools import product
from sklearn import tree
import pandas as pd
from collections import Counter
from six import StringIO
from drain import util, step
def explore(steps, reload=False):
return StepFrame(index=step.load(steps, reload=reload))
def expand(self, prefix=False, index=True, diff=True, existence=True):
"""
This function is a member of StepFrame and StepSeries. It is used to
expand the kwargs of the steps either into the index (index=True) or
as columns (index=False). By default (diff=True) only the kwargs which
differ among steps are expanded.
Note that index objects in pandas must be hashable so any unhashable
argument values are converted to string representations (using pprint)
when index=True.
If "inputs" is an argument those steps' kwargs are also expanded (and
their inputs recursively). If there are multiple steps with the same
argument names they are prefixed by their names or if those are not set
then by their class names. To enable prefixing for all args set
prefix=True.
Sometimes the difference between pipelines is that a step exists or it
doesn't. When diff=True and existence=True, instead of expanding all
the kwargs for that step, we expand a single column whose name is the
step name and whose value is a boolean indicating whether the step exists
in the given tree.
Args:
prefix: whether to always use step name prefix for kwarg name.
Default False, which uses prefixes when necessary, i.e. for
keywords that are shared by multiple step names.
index: If True expand args into index. Otherwise expand into
columns
diff: whether to only expand keywords whose values that are
non-constant
existence: whether to check for existence of a step in the tree
instead of a full diff. Only applicable when diff=True. See
note above.
Returns: a DatFrame with the arguments of the steps expanded.
"""
# collect kwargs resulting in a list of {name: kwargs} dicts
dicts = [step._collect_kwargs(s, drop_duplicate_names=True) for s in self.index]
# if any of the kwargs are themselves dicts, expand them
dicts = [{k: util.dict_expand(v) for k, v in s.items()} for s in dicts]
if diff:
diff_dicts = [{} for d in dicts] # the desired list of dicts
names = util.union([set(d.keys()) for d in dicts]) # all names among these steps
for name in names:
if existence:
ndicts = [d[name] for d in dicts if name in d.keys()] # all dicts for this name
else:
ndicts = [d[name] if name in d.keys() else {} for d in dicts]
ndiffs = util.dict_diff(ndicts) # diffs for this name
if sum(map(len, ndiffs)) == 0: # if they're all the same
# but not all had the key and existence=True
if existence and len(ndicts) < len(self):
for m, d in zip(diff_dicts, dicts):
m[name] = {tuple(): name in d.keys()}
else: # if there was a diff
diff_iter = iter(ndiffs)
for m, d in zip(diff_dicts, dicts):
if name in d.keys() or not existence:
m[name] = diff_iter.next() # get the corresponding diff
dicts = diff_dicts
# restructure so name is in the key
merged_dicts = []
for dd in dicts:
merged_dicts.append(util.dict_merge(*({tuple([name] + list(util.make_tuple(k))): v
for k, v in d.items()} for name, d in dd.items())))
# prefix_keys are the keys that will keep their prefix
keys = [list((k[1:] for k in d.keys())) for d in merged_dicts]
if not prefix:
key_count = [Counter(kk) for kk in keys]
prefix_keys = util.union({k for k in c if c[k] > 1} for c in key_count)
else:
prefix_keys = util.union((set(kk) for kk in keys))
merged_dicts = [{str.join('_', map(str, k if k[1:] in prefix_keys else k[1:])): v
for k, v in d.items()} for d in merged_dicts]
expanded = pd.DataFrame(merged_dicts, index=self.index)
if index:
columns = list(expanded.columns)
try:
if len(columns) > 0:
expanded.set_index(columns, inplace=True)
else:
expanded.index = [None]*len(expanded)
except TypeError:
_print_unhashable(expanded, columns)
expanded.set_index(columns, inplace=True)
df = self.__class__.__bases__[0](self, copy=True)
df.index = expanded.index
else:
df = pd.concat((expanded, self), axis=1)
# When index=False, the index is still a Step collection
df = StepFrame(expanded)
return df
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs):
"""
Apply function to each step object in the index
Args:
fn: function to apply. If a list then each function is applied
pairwise: whether to apply the function to pairs of steps
symmetric, diagonal, block: passed to apply_pairwise when pairwise=True
kwargs: a keyword arguments to pass to each function. Arguments
with list value are grid searched using util.dict_product.
Returns: a StepFrame or StepSeries
"""
search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1]
functions = util.make_list(fn)
search = list(product(functions, util.dict_product(kwargs)))
results = []
for fn, kw in search:
if not pairwise:
r = self.index.to_series().apply(lambda step: fn(step, **kw))
else:
r = apply_pairwise(self, fn,
symmetric=symmetric, diagonal=diagonal, block=block,
**kw)
name = [] if len(functions) == 1 else [fn.__name__]
name += util.dict_subset(kw, search_keys).values()
if isinstance(r, pd.DataFrame):
columns = pd.MultiIndex.from_tuples(
[tuple(name + util.make_list(c)) for c in r.columns])
r.columns = columns
else:
r.name = tuple(name)
results.append(r)
if len(results) > 1:
result = pd.concat(results, axis=1)
# get subset of parameters that were searched over
column_names = [] if len(functions) == 1 else [None]
column_names += search_keys
column_names += [None]*(len(result.columns.names)-len(column_names))
result.columns.names = column_names
return StepFrame(result)
else:
result = results[0]
if isinstance(result, pd.DataFrame):
return StepFrame(result)
else:
result.name = functions[0].__name__
return StepSeries(result)
def apply_pairwise(self, function, symmetric=True, diagonal=False, block=None, **kwargs):
"""
Helper function for pairwise apply.
Args:
steps: an ordered collection of steps
function: function to apply, first two positional arguments are steps
symmetric: whether function is symmetric in the two steps
diagonal: whether to apply on the diagonal
block: apply only when the given columns match
kwargs: keyword arguments to pass to the function
Returns:
DataFrame with index and columns equal to the steps argument
"""
steps = self.index
r = pd.DataFrame(index=steps, columns=steps)
for i, s1 in enumerate(steps):
j = range(i+1 if symmetric else len(steps))
if not diagonal:
j.remove(i)
other = set(steps[j])
if block is not None:
df = self.reset_index()
df = df.merge(df, on=block)
other &= set(df[df.index_x == s1].index_y)
for s2 in other:
r.ix[s1, s2] = function(s1, s2, **kwargs)
return r
def _assert_step_collection(steps):
for s in steps:
if not isinstance(s, step.Step):
raise ValueError("StepFrame index must consist of drain.step.Step objects")
if len(set(steps)) != len(steps):
raise ValueError("StepFrame steps must be unique")
class StepFrame(pd.DataFrame):
expand = expand
dapply = dapply
def __init__(self, *args, **kwargs):
pd.DataFrame.__init__(self, *args, **kwargs)
_assert_step_collection(self.index.values)
@property
def _constructor(self):
return StepFrame
@property
def _contructor_sliced(self):
return pd.Series
def __str__(self):
return self.expand().__str__()
def to_html(self, *args, **kwargs):
return self.expand().to_html(*args, **kwargs)
# resetting index makes it no longer a StepFrame
def reset_index(self, *args, **kwargs):
return pd.DataFrame(self).reset_index(*args, **kwargs)
class StepSeries(pd.Series):
expand = expand
dapply = dapply
def __init__(self, *args, **kwargs):
pd.Series.__init__(self, *args, **kwargs)
_assert_step_collection(self.index.values)
@property
def _constructor(self):
return StepSeries
@property
def _contructor_expanddim(self):
return StepFrame
def __str__(self):
return self.expand().__str__()
def to_html(self, *args, **kwargs):
return self.expand().to_html(*args, **kwargs)
def reset_index(self, *args, **kwargs):
return pd.Series(self).reset_index(*args, **kwargs)
def _print_unhashable(df, columns=None):
"""
Replace unhashable values in a DataFrame with their string repr
Args:
df: DataFrame
columns: columns to replace, if necessary. Default None replaces all columns.
"""
for c in df.columns if columns is None else columns:
if df.dtypes[c] == object:
try:
df[c].apply(hash)
except TypeError:
df[c] = df[c].dropna().apply(pformat).ix[df.index]
return df
def show_tree(tree, feature_names, max_depth=None):
import wand.image
filename = NamedTemporaryFile(delete=False).name
export_tree(tree, filename, [c.encode('ascii') for c in feature_names], max_depth)
img = wand.image.Image(filename=filename)
return img
def export_tree(clf, filename, feature_names=None, max_depth=None):
import pydot
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=feature_names, max_depth=max_depth)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf(filename)
|
{
"content_hash": "b5ff04bfd0fa45c88ff650398f6c48b4",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 96,
"avg_line_length": 35.34754098360656,
"alnum_prop": 0.6160838512197384,
"repo_name": "potash/drain",
"id": "c54427a3400d2b655c2109be327495431bfad944",
"size": "10781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drain/exploration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2266"
},
{
"name": "Python",
"bytes": "162094"
},
{
"name": "Shell",
"bytes": "4058"
}
],
"symlink_target": ""
}
|
__author__ = 'Charles Hong'
__emailID__ = 'csh6cw'
from helper import greeting
from goodbye import farewell
from quack import quack
if __name__ == '__main__':
greeting('hello')
quack('(quack)')
quack('Surprise!')
farewell('goodbye')
|
{
"content_hash": "09676bc468b144edd073f56c1a3a3c4a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 28,
"avg_line_length": 20.916666666666668,
"alnum_prop": 0.6374501992031872,
"repo_name": "charleshong/cs3240-labdemo",
"id": "1214e02f5d57ac620e5d70e07e2d1f8193a2adc1",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "694"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('system', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DeviceGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('classification', models.CharField(choices=[('F5 BIG-IP', 'F5 BIG-IP')], default='F5 BIG-IP', max_length=200)),
],
),
migrations.AddField(
model_name='device',
name='status',
field=models.CharField(max_length=64, null=True),
),
migrations.AddField(
model_name='device',
name='device_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='system.DeviceGroup'),
),
]
|
{
"content_hash": "10d83a9dc7bff991785268a296111d71",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 128,
"avg_line_length": 32.71875,
"alnum_prop": 0.5749761222540593,
"repo_name": "buzzsurfr/lbm",
"id": "751a5ca861727ed330e2549739b9a2c38d849e5d",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system/migrations/0002_auto_20160317_0909.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "516"
},
{
"name": "HTML",
"bytes": "24106"
},
{
"name": "JavaScript",
"bytes": "1275"
},
{
"name": "Python",
"bytes": "67683"
}
],
"symlink_target": ""
}
|
from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("iris" , "AdaBoostClassifier_1")
|
{
"content_hash": "3576ebd972dbe7ad89938336ae846f20",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 37,
"alnum_prop": 0.7972972972972973,
"repo_name": "antoinecarme/sklearn_explain",
"id": "39728194ac402fbf8d043700b8dc7485ed61e397",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/skl_datasets/iris/skl_dataset_iris_AdaBoostClassifier_1_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "110343"
}
],
"symlink_target": ""
}
|
"""Tool to interactively call sl4a methods.
SL4A (Scripting Layer for Android) is an RPC service exposing API calls on
Android.
Original version: https://github.com/damonkohler/sl4a
Fork in AOSP (can make direct system privileged calls):
https://android.googlesource.com/platform/external/sl4a/
Also allows access to Event Dispatcher, which allows waiting for asynchronous
actions. For more information see the Mobly codelab:
https://github.com/google/mobly#event-dispatcher
Usage:
$ sl4a_shell
>>> s.getBuildID()
u'N2F52'
"""
import argparse
import logging
from mobly.controllers.android_device_lib import jsonrpc_shell_base
class Sl4aShell(jsonrpc_shell_base.JsonRpcShellBase):
def _start_services(self, console_env):
"""Overrides superclass."""
self._ad.load_sl4a()
console_env['s'] = self._ad.sl4a
console_env['sl4a'] = self._ad.sl4a
console_env['ed'] = self._ad.ed
def _get_banner(self, serial):
lines = ['Connected to %s.' % serial,
'Call methods against:',
' ad (android_device.AndroidDevice)',
' sl4a or s (SL4A)',
' ed (EventDispatcher)']
return '\n'.join(lines)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Interactive client for sl4a.')
parser.add_argument(
'-s', '--serial',
help=
'Device serial to connect to (if more than one device is connected)')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
Sl4aShell().main(args.serial)
|
{
"content_hash": "36db2cc5256b4a7921408a2bb61a4e53",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 30.48076923076923,
"alnum_prop": 0.6561514195583596,
"repo_name": "l-meng/mobly",
"id": "1006ff8800aa860c2d122f84b80ac8ce4fcba422",
"size": "2188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/sl4a_shell.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "440648"
}
],
"symlink_target": ""
}
|
from .._abstract.abstract import BaseAGSServer
import json
from parameters import ClusterProtocol
########################################################################
class Clusters(BaseAGSServer):
"""
This resource is a collection of all the clusters created within your
site. The Create Cluster operation lets you define a new cluster
configuration.
Inputs:
url - server cluster url
securityHandler - AGSSecurityHandler
proxy_url - optional proxy url
proxy_port - optional proxy port
initialize - boolean, false means so not load data, true means load
the class' information as creation.
"""
_json = None
_url = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler, proxy_url=None,
proxy_port=None, initialize=False):
"""Constructor"""
self._securityHandler = securityHandler
if url.lower().endswith("/clusters"):
self._url = url
else:
self._url = url + "/clusters"
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" populates server admin information """
params = {
"f" : "json",
"token" : self._securityHandler.token
}
json_dict = self._do_get(url=self._url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implmented."
del k
del v
#----------------------------------------------------------------------
def createCluster(self, clusterName, machineNames="", tcpClusterPort=None):
"""
Creating a new cluster involves defining a clustering protocol that
will be shared by all server machines participating in the cluster.
All server machines that are added to the cluster must be
registered with the site. The clustering protocol and the initial
list of server machines are optional. In this case, the server
picks the default clustering protocol and selects the port numbers
such that they do not conflict with other configured ports on the
server machine. Once a cluster has been created you can add more
machines (to increase the compute power) or remove them (to reduce
the compute power) dynamically.
Inputs:
clusterName - The name of the cluster. This must be a unique
name within a site
machineNames - An optional comma-separated list of server
machines to be added to this cluster.
tcpClusterPort - A TCP port number that will be used by all the
server machines to communicate with each other
when using the TCP clustering protocol. This is
the default clustering protocol. If this
parameter is missing, a suitable default will
be used.
"""
url = self._url + "/create"
params = {
"token" : self._securityHandler.token,
"f" : "json",
"clusterName" : clusterName,
"machineNames" : machineNames,
"tcpClusterPort" : tcpClusterPort
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getAvailableMachines(self):
"""
This operation lists all the server machines that don't participate
in any cluster and are available to be added to a cluster.
The list would be empty if all registered server machines already
participate in some cluster.
"""
url = self._url + "/getAvailableMachines"
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class Cluster(BaseAGSServer):
"""
A Cluster is a group of server machines that host a collection of GIS
services. Grouping server machines into a cluster allows you to treat
them as a single unit to which you can publish GIS services.A cluster
with more than one server machine provides a level of fault tolerance
to the services. At the same time, having more than one machine
increases the computing power of your cluster, hence increasing the
overall throughput.
A cluster is dynamic with respect to the list of server machines. New
server machines can be added to increase computing power without
affecting the already running GIS services. You can also remove
machines from a cluster and re-assign them to another cluster.
"""
_json = None
_proxy_url = None
_proxy_port = None
_url = None
_securityHandler = None
_clusterName = None
_clusterProtocol = None
_configuredState = None
_machineNames = None
_configurationState = None
_clusters = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler, proxy_url=None, proxy_port=None,
initialize=False):
"""Constructor"""
self._proxy_port = proxy_port
self._proxy_url = proxy_url
self._securityHandler = securityHandler
self._url = url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" populates server admin information """
params = {
"f" : "json",
"token" : self._securityHandler.token
}
json_dict = self._do_get(url=self._url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implmented."
del k
del v
#----------------------------------------------------------------------
def __str__(self):
"""Constructor"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def clusters(self):
"""returns the cluster object for each server"""
if self._clusters is None:
self.__init()
Cs = []
for c in self._clusters:
url = self._url + "/%s" % c['clusterName']
Cs.append(Cluster(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True))
self._clusters = Cs
return self._clusters
#----------------------------------------------------------------------
def refresh(self):
"""refreshes the object's properties"""
self.__init()
#----------------------------------------------------------------------
@property
def clusterName(self):
"""returns the cluster name"""
if self._clusterName is None:
self.__init()
return self._clusterName
#----------------------------------------------------------------------
@property
def clusterProtocol(self):
"""returns the cluster's protocol parameters"""
if self._clusterProtocol is None:
self.__init()
return self._clusterProtocol
#----------------------------------------------------------------------
@property
def configuredState(self):
"""returns the current state of the cluster"""
if self._configurationState is None:
self.__init()
return self._configuredState
#----------------------------------------------------------------------
@property
def machineNames(self):
"""returns a list of machines in cluster"""
if self._machineNames is None:
self.__init()
return self._machineNames
#----------------------------------------------------------------------
def start(self):
"""
Starts the cluster. Starting a cluster involves starting all the
server machines within the cluster and the GIS services that are
deployed to the cluster. This operation attempts to start all the
server machines. If one or more of them cannot be reached, this
operation reports an error.
"""
params = {
"f" : "json",
"token" : self._securityHandler.token
}
url = self._url + "/start"
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def stop(self):
"""
Stops a cluster. This also stops all the GIS services that are
hosted on the cluster. This operation attempts to stop all the
server machines within the cluster. If one or more machines cannot
be reached, then this operation reports an error.
"""
params = {
"f" : "json",
"token" : self._securityHandler.token
}
url = self._url + "/stop"
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def delete(self):
"""
Deletes the cluster configuration. All the server machines in the
cluster will be stopped and returned to the pool of registered
machines. The GIS services that were deployed on the cluster are
also stopped. Deleting a cluster does not delete your GIS services.
"""
params = {
"f" : "json",
"token" : self._securityHandler.token
}
url = self._url + "/delete"
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def servicesInCluster(self):
"""
This resource lists all the services that are currently deployed to
the cluster (of machines). A service deployed to a cluster runs on
every server machine that is participating in the cluster.
This resource was added at ArcGIS 10.1 Service Pack 1.
"""
params = {
"f" : "json",
"token" : self._securityHandler.token
}
url = self._url + "/services"
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def machinesInCluster(self):
"""
This resource lists all the server machines that are currently
participating in the cluster. Each server machine listing is
accompanied by its status indicating whether the server machine is
running or stopped.
The list of server machines participating in a cluster is dynamic
as machines can be added or removed.
"""
url = self._url + "/machines"
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def addMachinesToCluster(self, machineNames):
"""
Adds new server machines to the cluster. The server machines need
to be registered with the site prior to this operation. When a
server machine is added to the cluster, it pulls all the GIS
services that were deployed to cluster and prepares to run them.
Inputs:
machineNames - A comma-separated list of machine names. The
machines must be registered prior to completing this operation.
"""
url = self._url + "/machines/add"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"machineNames" : machineNames
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def removeMachinesFromCluster(self,
machineNames):
"""
Removes server machines from the cluster. The server machines are
returned back to the pool of registered server machines.
Inputs:
machineNames - A comma-separated list of machine names. The
machines must be registered prior to completing this operation.
"""
url = self._url + "/machines/remove"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"machineNames" : machineNames
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def editProtocol(self, clusterProtocolObj):
"""
Updates the Cluster Protocol. This will cause the cluster to be
restarted with updated protocol configuration.
"""
if isinstance(clusterProtocolObj, ClusterProtocol): pass
else:
raise AttributeError("Invalid Input, must be a ClusterProtocal Object")
url = self._url + "/editProtocol"
params = {
"f" : "json",
"token" : self._securityHandler.token,
"clusterProtocol" : str(clusterProtocolObj)
}
return self._do_post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
{
"content_hash": "41d4d7ab6207757c3b5d2710c153214e",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 83,
"avg_line_length": 42.79100529100529,
"alnum_prop": 0.49057187017001547,
"repo_name": "achapkowski/ArcREST",
"id": "60577b1ac5fe3a6fed1ca95e0dc19f54680f8b6e",
"size": "16175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arcrest/manageags/_clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1234325"
}
],
"symlink_target": ""
}
|
"""State variables for analysis solutions such as powerflow.
"""
from CIM14.CDPSM.GIS_Connectivity.IEC61970.StateVariables.SvTapStep import SvTapStep
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#StateVariables"
nsPrefix = "cimStateVariables"
|
{
"content_hash": "f920b4512b3faed6326ce72b1085fc4d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 105,
"avg_line_length": 36.25,
"alnum_prop": 0.7931034482758621,
"repo_name": "rwl/PyCIM",
"id": "b0a900115bb920ed5f5fdce22c994c34287a8f78",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/CDPSM/GIS_Connectivity/IEC61970/StateVariables/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
"""Test the Foscam config flow."""
from unittest.mock import patch
from libpyfoscam.foscam import (
ERROR_FOSCAM_AUTH,
ERROR_FOSCAM_CMD,
ERROR_FOSCAM_UNAVAILABLE,
ERROR_FOSCAM_UNKNOWN,
)
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.foscam import config_flow
from tests.common import MockConfigEntry
VALID_CONFIG = {
config_flow.CONF_HOST: "10.0.0.2",
config_flow.CONF_PORT: 88,
config_flow.CONF_USERNAME: "admin",
config_flow.CONF_PASSWORD: "1234",
config_flow.CONF_STREAM: "Main",
config_flow.CONF_RTSP_PORT: 554,
}
OPERATOR_CONFIG = {
config_flow.CONF_USERNAME: "operator",
}
INVALID_RESPONSE_CONFIG = {
config_flow.CONF_USERNAME: "interr",
}
CAMERA_NAME = "Mocked Foscam Camera"
CAMERA_MAC = "C0:C1:D0:F4:B4:D4"
def setup_mock_foscam_camera(mock_foscam_camera):
"""Mock FoscamCamera simulating behaviour using a base valid config."""
def configure_mock_on_init(host, port, user, passwd, verbose=False):
product_all_info_rc = 0
dev_info_rc = 0
dev_info_data = {}
if (
host != VALID_CONFIG[config_flow.CONF_HOST]
or port != VALID_CONFIG[config_flow.CONF_PORT]
):
product_all_info_rc = dev_info_rc = ERROR_FOSCAM_UNAVAILABLE
elif (
user
not in [
VALID_CONFIG[config_flow.CONF_USERNAME],
OPERATOR_CONFIG[config_flow.CONF_USERNAME],
INVALID_RESPONSE_CONFIG[config_flow.CONF_USERNAME],
]
or passwd != VALID_CONFIG[config_flow.CONF_PASSWORD]
):
product_all_info_rc = dev_info_rc = ERROR_FOSCAM_AUTH
elif user == INVALID_RESPONSE_CONFIG[config_flow.CONF_USERNAME]:
product_all_info_rc = dev_info_rc = ERROR_FOSCAM_UNKNOWN
elif user == OPERATOR_CONFIG[config_flow.CONF_USERNAME]:
dev_info_rc = ERROR_FOSCAM_CMD
else:
dev_info_data["devName"] = CAMERA_NAME
dev_info_data["mac"] = CAMERA_MAC
mock_foscam_camera.get_product_all_info.return_value = (product_all_info_rc, {})
mock_foscam_camera.get_dev_info.return_value = (dev_info_rc, dev_info_data)
return mock_foscam_camera
mock_foscam_camera.side_effect = configure_mock_on_init
async def test_user_valid(hass):
"""Test valid config from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera, patch(
"homeassistant.components.foscam.async_setup_entry",
return_value=True,
) as mock_setup_entry:
setup_mock_foscam_camera(mock_foscam_camera)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == CAMERA_NAME
assert result["data"] == VALID_CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_invalid_auth(hass):
"""Test we handle invalid auth from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
invalid_user = VALID_CONFIG.copy()
invalid_user[config_flow.CONF_USERNAME] = "invalid"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
invalid_user,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
async def test_user_cannot_connect(hass):
"""Test we handle cannot connect error from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
invalid_host = VALID_CONFIG.copy()
invalid_host[config_flow.CONF_HOST] = "127.0.0.1"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
invalid_host,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_invalid_response(hass):
"""Test we handle invalid response error from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
invalid_response = VALID_CONFIG.copy()
invalid_response[config_flow.CONF_USERNAME] = INVALID_RESPONSE_CONFIG[
config_flow.CONF_USERNAME
]
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
invalid_response,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_response"}
async def test_user_already_configured(hass):
"""Test we handle already configured from user input."""
entry = MockConfigEntry(
domain=config_flow.DOMAIN,
data=VALID_CONFIG,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
setup_mock_foscam_camera(mock_foscam_camera)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_unknown_exception(hass):
"""Test we handle unknown exceptions from user input."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.foscam.config_flow.FoscamCamera",
) as mock_foscam_camera:
mock_foscam_camera.side_effect = Exception("test")
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
|
{
"content_hash": "8e053f4ccbf65a334fc484c91c77b456",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 88,
"avg_line_length": 32.02429149797571,
"alnum_prop": 0.6359039190897599,
"repo_name": "toddeye/home-assistant",
"id": "63c30c16babe562d7699f88b9abed88ac7270665",
"size": "7910",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/foscam/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import numpy as np
def read(filename):
f = open(filename)
names = f.readline().strip('\n#').split(' ')
dtype = np.concatenate((['int', 'int'], (36*'float ').split(' ')[:-1]))
f.close()
stat = np.genfromtxt(filename, dtype=dtype, names=names, skiprows=1)
return stat
|
{
"content_hash": "f3860ecb75cdc79ec0a519c0beb56860",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.5979381443298969,
"repo_name": "salomanders/NbodyPythonTools",
"id": "3109a74a4d1673d44bfafd97c7126d2ec1caf769",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nbdpt/rockstar/readout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126801"
}
],
"symlink_target": ""
}
|
"""Different policy losses."""
import gin
from seed_rl.agents.policy_gradient.modules import constraints
from seed_rl.agents.policy_gradient.modules import logging_module
import tensorflow as tf
@gin.configurable
class AdvantagePreprocessor(tf.Module):
"""Advantages preprocessor."""
def __init__(self, normalize=False, only_positive=False, only_top_half=False,
offset=None):
"""Creates the advantage preprocessor.
Args:
normalize: Whether to normalize adventages to have mean 0 and std 1 in
each batch.
only_positive: Whether to only take positive advantages.
only_top_half: Whether to only take top half of advantages in each batch.
offset: A value added to advantages (after normalization).
"""
self.normalize = normalize
self.only_positive = only_positive
self.only_top_half = only_top_half
self.offset = offset
def __call__(self, advantages):
"""Processes the advantages.
Args:
advantages: A tensor with advantages.
Returns:
Processed advantages and a tf.float32 tensor of the same shape
with 0s and 1s indicating which advantages should be used.
"""
mask = tf.ones_like(advantages)
if self.normalize:
advantages -= tf.reduce_mean(advantages)
advantages /= tf.math.reduce_std(advantages) + 1e-8
if self.only_top_half:
flat = tf.reshape(advantages, [-1])
median = tf.math.reduce_min(tf.math.top_k(flat, k=flat.shape[0] // 2,
sorted=False)[0])
mask *= tf.cast(advantages >= median, tf.float32)
if self.only_positive:
mask *= tf.cast(advantages > 0., tf.float32)
if self.offset is not None:
advantages += self.offset
return mask * advantages, mask
@gin.configurable
class GeneralizedAdvantagePolicyLoss(tf.Module, logging_module.LoggingModule):
"""Generalized advantage-based policy loss.
Covers typical cases like PG, PPO, V-trace, AWR and V-MPO.
"""
def __init__(self,
advantage_preprocessor=None,
use_importance_weights=False,
max_importance_weight=None,
ppo_epsilon=None,
max_advantage=None,
advantage_transformation=None,
temperature=None):
"""Creates the loss.
Args:
advantage_preprocessor: An object (of AdvantagePreprocessor class) used
to process the advantages.
use_importance_weights: Whether to use importance sampling weights.
max_importance_weight: Bigger importance weights are clipped.
ppo_epsilon: If not None, than PPO-style pessimistic clipping is used.
max_advantage: Bigger advantages are clipped. Clipping happens
between scaling and applying the transformation.
advantage_transformation: A function applied to advantages.
temperature: If not None, than MPO/AWR-style advantages exponentiation
is performed. This argument should be a Coefficient object which
provides the temperature for the exponentiation.
"""
self.advantage_preprocessor = (advantage_preprocessor or
AdvantagePreprocessor())
self.use_importance_weights = use_importance_weights
self.max_importance_weight = max_importance_weight
self.max_advantage = max_advantage
self.advantage_transformation = advantage_transformation
self.ppo_epsilon = ppo_epsilon
self.temperature = temperature
def __call__(self, advantages, target_action_log_probs,
behaviour_action_log_probs, actions, target_logits,
behaviour_logits, parametric_action_distribution=None):
self.log('GeneralizedAdvantagePolicyLoss/advantages', advantages)
self.log('GeneralizedAdvantagePolicyLoss/abs_advantages',
tf.abs(advantages))
self.log('GeneralizedAdvantagePolicyLoss/log_pi', target_action_log_probs)
self.log('GeneralizedAdvantagePolicyLoss/log_mu',
behaviour_action_log_probs)
advantages, mask = self.advantage_preprocessor(advantages)
# advantage transformation (e.g. AWR/V-MPO)
if self.advantage_transformation is not None:
assert self.temperature is not None
self.log('GeneralizedAdvantagePolicyLoss/temperature', self.temperature())
advantages = advantages / tf.stop_gradient(self.temperature())
if self.max_advantage is not None:
advantages = tf.minimum(advantages, self.max_advantage)
self.log('GeneralizedAdvantagePolicyLoss/p_clipped_advantage',
tf.cast(advantages == self.max_advantage, tf.float32))
advantages_before_transformation = advantages
advantages = mask * self.advantage_transformation(advantages)
self.log('GeneralizedAdvantagePolicyLoss/transformed_advantages',
advantages)
else:
if self.max_advantage is not None:
advantages = tf.minimum(advantages, self.max_advantage)
advantages *= mask
self.log('GeneralizedAdvantagePolicyLoss/processed_advantages', advantages)
max_adv = tf.reduce_max(mask * advantages + (1. - mask) * -1e9)
min_adv = tf.reduce_min(mask * advantages + (1. - mask) * 1e9)
self.log('GeneralizedAdvantagePolicyLoss/processed_advantages_min', min_adv)
self.log('GeneralizedAdvantagePolicyLoss/processed_advantages_max', max_adv)
self.log('GeneralizedAdvantagePolicyLoss/processed_advantages_range',
max_adv - min_adv)
# PG loss
loss = -target_action_log_probs * tf.stop_gradient(advantages)
# importance sampling weights
log_rho = target_action_log_probs - behaviour_action_log_probs
log_rho = tf.stop_gradient(log_rho)
if self.ppo_epsilon is not None:
# This is written differently that the standard PPO loss but should give
# the same gradient.
clip_pos_mask = ((advantages > 0) &
(log_rho > tf.math.log(1 + self.ppo_epsilon)))
clip_neg_mask = ((advantages < 0) &
(log_rho < -tf.math.log(1 + self.ppo_epsilon)))
loss_mask = tf.cast(~(clip_pos_mask | clip_neg_mask), tf.float32)
loss *= loss_mask
log_rho *= loss_mask # to avoid overflow in exp
if self.max_importance_weight is not None:
log_rho = tf.minimum(log_rho, tf.math.log(self.max_importance_weight))
self.log('GeneralizedAdvantagePolicyLoss/p_clipped_iw',
tf.cast(log_rho == tf.math.log(self.max_importance_weight),
tf.float32))
self.log('GeneralizedAdvantagePolicyLoss/log_rho', log_rho)
if self.use_importance_weights:
loss *= tf.exp(log_rho)
loss = tf.reduce_mean(loss)
if self.advantage_transformation is not None: # temperature adjustment
# This is KL between nonparametric target distribution and behavioral one.
# Eq. (4) in V-MPO paper.
advantages = advantages_before_transformation
advantages *= mask
advantages -= (1. - mask) * 1e3 # will be 0 after exp
kl = tf.math.reduce_logsumexp(advantages) - tf.math.log(
tf.reduce_sum(mask) + 1e-3)
self.log('GeneralizedAdvantagePolicyLoss/mpo_kl', kl)
loss += self.temperature.adjustment_loss(kl)
return loss
@gin.configurable
def pg():
return GeneralizedAdvantagePolicyLoss()
@gin.configurable
def vtrace(max_importance_weight=1.):
return GeneralizedAdvantagePolicyLoss(
use_importance_weights=True,
max_importance_weight=max_importance_weight)
@gin.configurable
def ppo(epsilon, normalize_advantages=False, advantage_offset=None):
return GeneralizedAdvantagePolicyLoss(
use_importance_weights=True,
ppo_epsilon=epsilon,
advantage_preprocessor=AdvantagePreprocessor(
normalize=normalize_advantages,
offset=advantage_offset))
@gin.configurable
def awr(beta, w_max):
return GeneralizedAdvantagePolicyLoss(
advantage_transformation=tf.exp,
temperature=constraints.FixedCoefficient(beta),
max_advantage=tf.math.log(w_max))
def bc_logp():
return GeneralizedAdvantagePolicyLoss(
advantage_transformation=lambda x: tf.constant(
1, dtype=x.dtype, shape=x.shape),
temperature=constraints.FixedCoefficient(1))
@gin.configurable
def softmax_all_dims(t):
# Softmax with reduction across all axes.
flat = tf.reshape(t, [-1])
return tf.reshape(tf.nn.softmax(flat), t.shape)
@gin.configurable
def vmpo(e_n):
# Backward KL regularizer needs to be added separately to get full V-MPO.
return GeneralizedAdvantagePolicyLoss(
advantage_transformation=softmax_all_dims,
advantage_preprocessor=AdvantagePreprocessor(only_top_half=True),
temperature=constraints.LagrangeInequalityCoefficient(
threshold=e_n,
adjustment_speed=10,
init_variables=False))
@gin.configurable
def repeat_positive_advantages():
# This is supervised learning on actions with positive advantages.
# Both, AWR and V-MPO have this behaviour in the limit (beta->0 or e_n->0).
return awr(beta=1e-6, w_max=1.)
|
{
"content_hash": "04df54deb8fc18aac7631a32b29d3311",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 80,
"avg_line_length": 39.07327586206897,
"alnum_prop": 0.685273028130171,
"repo_name": "google-research/seed_rl",
"id": "9a71641fe1247abb28aa320cb56884374fa7616d",
"size": "9660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/policy_gradient/modules/policy_losses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "41131"
},
{
"name": "Jupyter Notebook",
"bytes": "72883"
},
{
"name": "Python",
"bytes": "614110"
},
{
"name": "Shell",
"bytes": "31284"
},
{
"name": "Starlark",
"bytes": "932"
}
],
"symlink_target": ""
}
|
import unittest
import os
import time
import sys
import random
import math
import functools
import contextlib
import tempfile
import numpy as np
from PIL import Image, ImageEnhance
import paddle
import paddle.fluid as fluid
from paddle.dataset.common import download
from paddle.fluid.contrib.slim.quantization import PostTrainingQuantization
paddle.enable_static()
random.seed(0)
np.random.seed(0)
DATA_DIM = 224
THREAD = 1
BUF_SIZE = 102400
DATA_DIR = 'data/ILSVRC2012'
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
def resize_short(img, target_size):
percent = float(target_size) / min(img.size[0], img.size[1])
resized_width = int(round(img.size[0] * percent))
resized_height = int(round(img.size[1] * percent))
img = img.resize((resized_width, resized_height), Image.LANCZOS)
return img
def crop_image(img, target_size, center):
width, height = img.size
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img.crop((w_start, h_start, w_end, h_end))
return img
def process_image(sample, mode, color_jitter, rotate):
img_path = sample[0]
img = Image.open(img_path)
img = resize_short(img, target_size=256)
img = crop_image(img, target_size=DATA_DIM, center=True)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
return img, sample[1]
def _reader_creator(
file_list,
mode,
shuffle=False,
color_jitter=False,
rotate=False,
data_dir=DATA_DIR,
):
def reader():
with open(file_list) as flist:
full_lines = [line.strip() for line in flist]
if shuffle:
np.random.shuffle(full_lines)
lines = full_lines
for line in lines:
img_path, label = line.split()
img_path = os.path.join(data_dir, img_path)
if not os.path.exists(img_path):
continue
yield img_path, int(label)
mapper = functools.partial(
process_image, mode=mode, color_jitter=color_jitter, rotate=rotate
)
return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)
def val(data_dir=DATA_DIR):
file_list = os.path.join(data_dir, 'val_list.txt')
return _reader_creator(file_list, 'val', shuffle=False, data_dir=data_dir)
class TestPostTrainingQuantization(unittest.TestCase):
def setUp(self):
self.int8_download = 'int8/download'
self.cache_folder = os.path.expanduser(
'~/.cache/paddle/dataset/' + self.int8_download
)
self.data_cache_folder = ''
data_urls = []
data_md5s = []
if os.environ.get('DATASET') == 'full':
data_urls.append(
'https://paddle-inference-dist.bj.bcebos.com/int8/ILSVRC2012_img_val.tar.gz.partaa'
)
data_md5s.append('60f6525b0e1d127f345641d75d41f0a8')
data_urls.append(
'https://paddle-inference-dist.bj.bcebos.com/int8/ILSVRC2012_img_val.tar.gz.partab'
)
data_md5s.append('1e9f15f64e015e58d6f9ec3210ed18b5')
self.data_cache_folder = self.download_data(
data_urls, data_md5s, "full_data", False
)
else:
data_urls.append(
'http://paddle-inference-dist.bj.bcebos.com/int8/calibration_test_data.tar.gz'
)
data_md5s.append('1b6c1c434172cca1bf9ba1e4d7a3157d')
self.data_cache_folder = self.download_data(
data_urls, data_md5s, "small_data", False
)
# reader/decorator.py requires the relative path to the data folder
if not os.path.exists("./data/ILSVRC2012"):
cmd = 'rm -rf {0} && ln -s {1} {0}'.format(
"data", self.data_cache_folder
)
os.system(cmd)
self.batch_size = 1 if os.environ.get('DATASET') == 'full' else 50
self.infer_iterations = (
50000 if os.environ.get('DATASET') == 'full' else 2
)
self.root_path = tempfile.TemporaryDirectory()
self.int8_model = os.path.join(
self.root_path.name, "post_training_quantization"
)
def tearDown(self):
self.root_path.cleanup()
def cache_unzipping(self, target_folder, zip_path):
if not os.path.exists(target_folder):
cmd = 'mkdir {0} && tar xf {1} -C {0}'.format(
target_folder, zip_path
)
os.system(cmd)
def download_data(self, data_urls, data_md5s, folder_name, is_model=True):
data_cache_folder = os.path.join(self.cache_folder, folder_name)
zip_path = ''
if os.environ.get('DATASET') == 'full':
file_names = []
for i in range(0, len(data_urls)):
download(data_urls[i], self.int8_download, data_md5s[i])
file_names.append(data_urls[i].split('/')[-1])
zip_path = os.path.join(
self.cache_folder, 'full_imagenet_val.tar.gz'
)
if not os.path.exists(zip_path):
cat_command = 'cat'
for file_name in file_names:
cat_command += ' ' + os.path.join(
self.cache_folder, file_name
)
cat_command += ' > ' + zip_path
os.system(cat_command)
if os.environ.get('DATASET') != 'full' or is_model:
download(data_urls[0], self.int8_download, data_md5s[0])
file_name = data_urls[0].split('/')[-1]
zip_path = os.path.join(self.cache_folder, file_name)
print('Data is downloaded at {0}'.format(zip_path))
self.cache_unzipping(data_cache_folder, zip_path)
return data_cache_folder
def download_model(self):
pass
def run_program(self, model_path, batch_size, infer_iterations):
image_shape = [3, 224, 224]
place = fluid.CPUPlace()
exe = fluid.Executor(place)
[
infer_program,
feed_dict,
fetch_targets,
] = fluid.io.load_inference_model(model_path, exe)
val_reader = paddle.batch(val(), batch_size)
iterations = infer_iterations
test_info = []
cnt = 0
periods = []
for batch_id, data in enumerate(val_reader()):
image = np.array([x[0].reshape(image_shape) for x in data]).astype(
"float32"
)
label = np.array([x[1] for x in data]).astype("int64")
label = label.reshape([-1, 1])
t1 = time.time()
_, acc1, _ = exe.run(
infer_program,
feed={feed_dict[0]: image, feed_dict[1]: label},
fetch_list=fetch_targets,
)
t2 = time.time()
period = t2 - t1
periods.append(period)
test_info.append(np.mean(acc1) * len(data))
cnt += len(data)
if (batch_id + 1) % 100 == 0:
print("{0} images,".format(batch_id + 1))
sys.stdout.flush()
if (batch_id + 1) == iterations:
break
throughput = cnt / np.sum(periods)
latency = np.average(periods)
acc1 = np.sum(test_info) / cnt
return (throughput, latency, acc1)
def generate_quantized_model(
self,
model_path,
quantizable_op_type,
batch_size,
algo="KL",
round_type="round",
is_full_quantize=False,
is_use_cache_file=False,
is_optimize_model=False,
batch_nums=10,
onnx_format=False,
):
try:
os.system("mkdir " + self.int8_model)
except Exception as e:
print(
"Failed to create {} due to {}".format(self.int8_model, str(e))
)
sys.exit(-1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
scope = fluid.global_scope()
val_reader = val()
ptq = PostTrainingQuantization(
executor=exe,
sample_generator=val_reader,
model_dir=model_path,
batch_size=batch_size,
batch_nums=batch_nums,
algo=algo,
quantizable_op_type=quantizable_op_type,
round_type=round_type,
is_full_quantize=is_full_quantize,
optimize_model=is_optimize_model,
onnx_format=onnx_format,
is_use_cache_file=is_use_cache_file,
)
ptq.quantize()
ptq.save_quantized_model(self.int8_model)
def run_test(
self,
model,
algo,
round_type,
data_urls,
data_md5s,
quantizable_op_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
diff_threshold,
onnx_format=False,
batch_nums=10,
):
infer_iterations = self.infer_iterations
batch_size = self.batch_size
model_cache_folder = self.download_data(data_urls, data_md5s, model)
print(
"Start FP32 inference for {0} on {1} images ...".format(
model, infer_iterations * batch_size
)
)
(fp32_throughput, fp32_latency, fp32_acc1) = self.run_program(
os.path.join(model_cache_folder, "model"),
batch_size,
infer_iterations,
)
print(
"Start INT8 post training quantization for {0} on {1} images ...".format(
model, batch_nums * batch_size
)
)
self.generate_quantized_model(
os.path.join(model_cache_folder, "model"),
quantizable_op_type,
batch_size,
algo,
round_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
batch_nums,
onnx_format,
)
print(
"Start INT8 inference for {0} on {1} images ...".format(
model, infer_iterations * batch_size
)
)
(int8_throughput, int8_latency, int8_acc1) = self.run_program(
self.int8_model, batch_size, infer_iterations
)
print("---Post training quantization of {} method---".format(algo))
print(
"FP32 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.".format(
model, batch_size, fp32_throughput, fp32_latency, fp32_acc1
)
)
print(
"INT8 {0}: batch_size {1}, throughput {2} images/second, latency {3} second, accuracy {4}.\n".format(
model, batch_size, int8_throughput, int8_latency, int8_acc1
)
)
sys.stdout.flush()
delta_value = fp32_acc1 - int8_acc1
self.assertLess(delta_value, diff_threshold)
class TestPostTrainingKLForMobilenetv1(TestPostTrainingQuantization):
def test_post_training_kl_mobilenetv1(self):
model = "MobileNet-V1"
algo = "KL"
round_type = "round"
data_urls = [
'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz'
]
data_md5s = ['13892b0716d26443a8cdea15b3c6438b']
quantizable_op_type = [
"conv2d",
"depthwise_conv2d",
"mul",
"pool2d",
]
is_full_quantize = False
is_use_cache_file = False
is_optimize_model = True
diff_threshold = 0.025
batch_nums = 3
self.run_test(
model,
algo,
round_type,
data_urls,
data_md5s,
quantizable_op_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
diff_threshold,
)
class TestPostTrainingavgForMobilenetv1(TestPostTrainingQuantization):
def test_post_training_avg_mobilenetv1(self):
model = "MobileNet-V1"
algo = "avg"
round_type = "round"
data_urls = [
'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz'
]
data_md5s = ['13892b0716d26443a8cdea15b3c6438b']
quantizable_op_type = [
"conv2d",
"depthwise_conv2d",
"mul",
]
is_full_quantize = False
is_use_cache_file = False
is_optimize_model = True
diff_threshold = 0.025
self.run_test(
model,
algo,
round_type,
data_urls,
data_md5s,
quantizable_op_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
diff_threshold,
)
class TestPostTraininghistForMobilenetv1(TestPostTrainingQuantization):
def test_post_training_hist_mobilenetv1(self):
model = "MobileNet-V1"
algo = "hist"
round_type = "round"
data_urls = [
'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz'
]
data_md5s = ['13892b0716d26443a8cdea15b3c6438b']
quantizable_op_type = [
"conv2d",
"depthwise_conv2d",
"mul",
]
is_full_quantize = False
is_use_cache_file = False
is_optimize_model = True
diff_threshold = 0.03
batch_nums = 3
self.run_test(
model,
algo,
round_type,
data_urls,
data_md5s,
quantizable_op_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
diff_threshold,
batch_nums=batch_nums,
)
class TestPostTrainingAbsMaxForMobilenetv1(TestPostTrainingQuantization):
def test_post_training_abs_max_mobilenetv1(self):
model = "MobileNet-V1"
algo = "abs_max"
round_type = "round"
data_urls = [
'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz'
]
data_md5s = ['13892b0716d26443a8cdea15b3c6438b']
quantizable_op_type = [
"conv2d",
"mul",
]
is_full_quantize = False
is_use_cache_file = False
is_optimize_model = False
# The accuracy diff of post-training quantization (abs_max) maybe bigger
diff_threshold = 0.05
self.run_test(
model,
algo,
round_type,
data_urls,
data_md5s,
quantizable_op_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
diff_threshold,
)
class TestPostTrainingAvgONNXFormatForMobilenetv1(TestPostTrainingQuantization):
def test_post_training_onnx_format_mobilenetv1(self):
model = "MobileNet-V1"
algo = "emd"
round_type = "round"
data_urls = [
'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz'
]
data_md5s = ['13892b0716d26443a8cdea15b3c6438b']
quantizable_op_type = [
"conv2d",
"depthwise_conv2d",
"mul",
]
is_full_quantize = False
is_use_cache_file = False
is_optimize_model = True
onnx_format = True
diff_threshold = 0.05
batch_nums = 3
self.run_test(
model,
algo,
round_type,
data_urls,
data_md5s,
quantizable_op_type,
is_full_quantize,
is_use_cache_file,
is_optimize_model,
diff_threshold,
onnx_format=onnx_format,
batch_nums=batch_nums,
)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d6951429d4bf3f6c45e53f123eb810d2",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 113,
"avg_line_length": 31.103053435114504,
"alnum_prop": 0.5403730519082096,
"repo_name": "luotao1/Paddle",
"id": "37daeab1186299f23da8e90101ad12926ebde7f9",
"size": "16910",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/contrib/slim/tests/test_post_training_quantization_mobilenetv1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
"""Numpy utility functions."""
# Could penalize minimum volume for RBFs.
# Could penalize heavily if another quadric has higher weight within this quad's
# radius.
# Plot fraction in which 1 quadrics has 90% or more of the total weight.
# If total weight is below epsilon, don't show (or show which those are). Set
# this using a colab notebook, which will need to work first...
# Replace exp with some better falloff function!
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
# This import pulls in necessary backend dependencies for matplotlib:
# pylint:disable=unused-import
from mpl_toolkits.mplot3d import Axes3D
# pylint:enable=unused-import
import numpy as np
from skimage import measure
from skimage.transform import resize
import tensorflow as tf
def batch_np(arr, batch_size):
s = arr.shape
arr = np.expand_dims(arr, 0)
tile = [batch_size] + [1] * len(s)
return np.tile(arr, tile)
def make_coordinate_grid(height, width, is_screen_space, is_homogeneous):
"""Returns an array containing the coordinate grid values for an image.
Outputs a numpy array to avoid adding unnecessary operations to the graph.
Args:
height: int containing the height of the output image.
width: int containing the width of the output image.
is_screen_space: bool. If true, then the coordinates are measured in pixels.
If false, they are in the range (0-1).
is_homogeneous: bool. If true, then a 1 is appended to the end of each
coordinate.
Returns:
coords: numpy array of shape [height, width, 2] or [height, width, 3],
depending on whether is_homogeneous is true. The value at location
[i, j, :] is the (x,y) or (x,y,1) coordinate value at that location.
"""
with tf.name_scope('make_coordinate_grid'):
x_coords = np.linspace(0.5, width - 0.5, width)
y_coords = np.linspace(0.5, height - 0.5, height)
if not is_screen_space:
x_coords /= width
y_coords /= height
grid_x, grid_y = np.meshgrid(
x_coords, y_coords, sparse=False, indexing='xy')
if is_homogeneous:
homogeneous_coords = np.ones_like(grid_x)
return np.stack([grid_x, grid_y, homogeneous_coords], axis=2)
return np.stack([grid_x, grid_y], axis=2)
def make_coordinate_grid_3d(length, height, width, is_screen_space,
is_homogeneous):
"""Returns an array containing the coordinate grid values for a volume.
Outputs a numpy array to avoid adding unnecessary operations to the graph.
Args:
length: int containing the length of the output volume.
height: int containing the height of the output volume.
width: int containing the width of the output volume.
is_screen_space: bool. If true, then the coordinates are measured in pixels.
If false, they are in the range (0-1).
is_homogeneous: bool. If true, then a 1 is appended to the end of each
coordinate.
Returns:
coords: numpy array of shape [length, height, width, 3] or
[length, height, width, 4], depending on whether is_homogeneous is true.
The value at location [i, j, k, :] is the (x,y,z) or (x,y,z,1) coordinate
value at that location.
"""
x_coords = np.linspace(0.5, width - 0.5, width)
y_coords = np.linspace(0.5, height - 0.5, height)
z_coords = np.linspace(0.5, length - 0.5, length)
if not is_screen_space:
x_coords /= width
y_coords /= height
z_coords /= length
grid_x, grid_y, grid_z = np.meshgrid(
x_coords, y_coords, z_coords, sparse=False, indexing='ij')
if is_homogeneous:
homogeneous_coords = np.ones_like(grid_x)
grid = np.stack([grid_x, grid_y, grid_z, homogeneous_coords], axis=3)
else:
grid = np.stack([grid_x, grid_y, grid_z], axis=3)
# Currently the order is (w, h, l), but we need (l, h, w) for
# TensorFlow compatibility:
return np.swapaxes(grid, 0, 2)
def filter_valid(mask, vals):
"""Filters an array to only its valid values.
Args:
mask: Boolean numpy array with shape [...] or [..., 1].
vals: Numpy array with shape [...] or [..., 1]. Trailing 1 doesn't have to
match.
Returns:
Numpy array with the same rank as vals but without elements that have a
False value in the mask.
"""
if mask.shape[-1] == 1:
assert len(mask.shape) == len(vals.shape)
mask = np.reshape(mask, mask.shape[:-1])
else:
assert len(mask.shape) == len(vals.shape) - 1
return vals[mask, :]
def zero_by_mask(mask, vals, replace_with=0.0):
"""Sets the invalid part of vals to the value of replace_with.
Args:
mask: Boolean array that matches vals in shape, except for squeezable dims
and the final dimension (the 'channel' dimension).
vals: Numpy array with shape [..., channel_count].
replace_with: Value to put in invalid locations, if not 0.0. Dtype should be
compatible with that of vals.
Returns:
Numpy array with shape [..., channel_count] with 0 in invalid locations.
"""
vals = vals.copy()
mask = np.reshape(mask, vals.shape[:-1])
vals[np.logical_not(mask), :] = replace_with
return vals
def make_mask(im, thresh=0.0):
"""Computes a numpy boolean mask from an array of (nonnegative) floats."""
mv = np.min(im)
assert mv >= 0.0
return im > thresh
def make_pixel_mask(im):
"""Computes a (height, width) mask that is true when any channel is true."""
channels_valid = im.astype(np.bool)
mask = np.any(channels_valid, axis=2)
assert len(mask.shape) == 2
return mask
def thresh_and_radius_to_distance(radius, thresh):
"""Computes the distance at which an rbf reaches a value."""
# Given a radius in world units, and a threshold, computes the distance in
# world units at which an rbf with that radius would reach a value of thresh
return np.sqrt(-2.0 * radius * np.log(thresh))
def plot_rbfs_at_thresh(centers, radii, thresh=0.5):
"""Generates images visualizing the rbfs at a threshold.
Args:
centers: numpy array with shape (batch_size, rbf_count, 3).
radii: numpy array with shape (batch_size, rbf_count, 3 or 1).
thresh: The threshold at which to show RBFs.
Returns:
Visualization images.
"""
batch_size, rbf_count = centers.shape[0:2]
outputs = []
if radii.shape[2] == 1:
radii = np.tile(radii, [1, 1, 3])
# Set of all spherical angles:
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
unit_sphere_x = np.outer(np.cos(u), np.sin(v))
unit_sphere_y = np.outer(np.sin(u), np.sin(v))
unit_sphere_z = np.outer(np.ones_like(u), np.cos(v))
for bi in range(batch_size):
fig = plt.figure(figsize=4 * plt.figaspect(1)) # Square figure
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111, projection='3d')
for rbf_idx in range(rbf_count):
# Radii corresponding to the coefficients:
rx = thresh_and_radius_to_distance(radii[bi, rbf_idx, 0], thresh)
ry = thresh_and_radius_to_distance(radii[bi, rbf_idx, 1], thresh)
rz = thresh_and_radius_to_distance(radii[bi, rbf_idx, 2], thresh)
center_x = centers[bi, rbf_idx, 0]
center_y = centers[bi, rbf_idx, 1]
center_z = centers[bi, rbf_idx, 2]
# Cartesian coordinates that correspond to the spherical angles:
# (this is the equation of an ellipsoid):
x = rx * unit_sphere_x + center_x
y = ry * unit_sphere_y + center_y
z = rz * unit_sphere_z + center_z
# Plot:
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='b')
# Adjustment of the axes, so that they all have the same span:
for axis in 'xyz':
getattr(ax, 'set_%slim' % axis)((-0.5, 0.5))
canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
height, width = int(height), int(width)
image = np.fromstring(
canvas.tostring_rgb(), dtype='uint8').reshape(height, width, 3)
outputs.append(image)
plt.close('all')
return np.stack(outputs)
def plot_rbfs(centers, radii, scale=10.0):
"""Generates images visualizing the rbfs.
Args:
centers: numpy array with shape (batch_size, rbf_count, 3).
radii: numpy array with shape (batch_size, rbf_count, 3 or 1).
scale: The multiplication factor in radii to visualize.
Returns:
np array with the visualization images.
"""
batch_size, rbf_count = centers.shape[0:2]
outputs = []
if radii.shape[2] == 1:
radii = np.tile(radii, [1, 1, 3])
# Set of all spherical angles:
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
unit_sphere_x = np.outer(np.cos(u), np.sin(v))
unit_sphere_y = np.outer(np.sin(u), np.sin(v))
unit_sphere_z = np.outer(np.ones_like(u), np.cos(v))
for bi in range(batch_size):
fig = plt.figure(figsize=4 * plt.figaspect(1)) # Square figure
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111, projection='3d')
for rbf_idx in range(rbf_count):
# Radii corresponding to the coefficients:
rx = radii[bi, rbf_idx, 0] * scale
ry = radii[bi, rbf_idx, 1] * scale
rz = radii[bi, rbf_idx, 2] * scale
center_x = centers[bi, rbf_idx, 0]
center_y = centers[bi, rbf_idx, 1]
center_z = centers[bi, rbf_idx, 2]
# Cartesian coordinates that correspond to the spherical angles:
# (this is the equation of an ellipsoid):
x = rx * unit_sphere_x + center_x
y = ry * unit_sphere_y + center_y
z = rz * unit_sphere_z + center_z
# Plot:
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='b')
# Adjustment of the axes, so that they all have the same span:
for axis in 'xyz':
getattr(ax, 'set_%slim' % axis)((-0.5, 0.5))
canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
height, width = int(height), int(width)
image = np.fromstring(
canvas.tostring_rgb(), dtype='uint8').reshape(height, width, 3)
outputs.append(image)
plt.close('all')
return np.stack(outputs)
def cube_and_render(volume, thresh):
"""Extract a mesh and render an image."""
volume = np.squeeze(volume)
length, height, width = volume.shape
resolution = length
# This function doesn't support non-cube volumes:
assert resolution == height and resolution == width
try:
vertices, faces, _, _ = measure.marching_cubes_lewiner(volume, thresh)
x, y, z = [np.array(x) for x in zip(*vertices)]
xyzw = np.stack([x, y, z, np.ones_like(x)], axis=1)
# Center the volume around the origin:
xyzw += np.array(
[[-resolution / 2.0, -resolution / 2.0, -resolution / 2.0, 0.]])
# This assumes the world is right handed with y up; matplotlib's renderer
# has z up and is left handed:
# Reflect across z, rotate about x, and rescale to [-0.5, 0.5].
xyzw *= np.array(
[[1.0 / resolution, 1.0 / resolution, -1.0 / resolution, 1]])
y_up_to_z_up = np.array([[1., 0., 0., 0.], [0., 0., -1., 0.],
[0., 1., 0., 0.], [0., 0., 0., 1.]])
xyzw = np.matmul(y_up_to_z_up, xyzw.T).T
world_space_xyz = np.copy(xyzw[:, :3])
# TODO(kgenova): Apply any transformation you want to the mesh in the same
# way as above (i.e. the inverse of the camera extrinsics). If that's hard
# to express in this space it could go before the transformations above.
# This is an example that just rotates the object off axis a bit.
rotation_around_z = np.array([[-0.9396926, -0.3420202, 0., 0.],
[0.3420202, -0.9396926, 0., 0.],
[0., 0., 1.0, 0.], [0., 0., 0., 1.]])
xyzw = np.matmul(rotation_around_z, xyzw.T).T
# Back to matplotlib format:
x, y, z = [np.squeeze(x) for x in np.split(xyzw[:, :3], 3, axis=1)]
fig = plt.figure(figsize=(8, 8))
dpi = fig.get_dpi()
fig.set_size_inches(1220.0 / float(dpi), 1220.0 / float(dpi))
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(
x,
y,
z,
triangles=faces,
linewidth=0.0,
shade=True,
cmap='viridis',
antialiased=False)
ax.set_xlim(-0.35, 0.35)
ax.set_ylim(-0.35, 0.35)
ax.set_zlim(-0.45, 0.25)
ax.set_axis_off()
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
height, width = int(height), int(width)
image = np.fromstring(
canvas.tostring_rgb(), dtype='uint8').reshape(height, width, 3)
except ValueError:
image = np.zeros([128, 128, 3], dtype=np.uint8) + 255
world_space_xyz = np.zeros([100, 3])
faces = np.zeros([300, 3])
except RuntimeError:
image = np.zeros([128, 128, 3], dtype=np.uint8) + 255
world_space_xyz = np.zeros([100, 3])
faces = np.zeros([300, 3])
plt.close('all')
return image, world_space_xyz, np.copy(faces)
def sample_surface(quadrics, centers, radii, length, height, width,
renormalize):
"""Deprecated: Samples the SIF value at the surface locations."""
quadric_count = quadrics.shape[0]
homogeneous_coords = make_coordinate_grid_3d(
length, height, width, is_screen_space=False, is_homogeneous=True)
homogeneous_coords = np.reshape(homogeneous_coords,
[length, height, width, 4])
homogeneous_coords[:, :, :, :3] -= 0.5
flat_coords = np.reshape(homogeneous_coords, [length * height * width, 4])
surface_volume = np.zeros([length, height, width, 1], dtype=np.float32)
max_bf_weights = np.zeros([length, height, width, 1], dtype=np.float32)
total_bf_weights = np.zeros([length, height, width, 1], dtype=np.float32)
for qi in range(quadric_count):
quadric = quadrics[qi, :, :]
center = centers[qi, :]
# This is the one to uncomment when updating the renderer.
radius = radii[qi, :3]
offset_coords = flat_coords.copy()
offset_coords[:, :3] -= np.reshape(center, [1, 3])
half_distance = np.matmul(quadric, offset_coords.T).T
algebraic_distance = np.sum(offset_coords * half_distance, axis=1)
squared_diff = (offset_coords[:, :3] * offset_coords[:, :3])
scale = np.reciprocal(np.minimum(-2 * radius, 1e-6))
bf_weights = np.exp(np.sum(scale * squared_diff, axis=1))
volume_addition = np.reshape(algebraic_distance * bf_weights,
[length, height, width, 1])
max_bf_weights = np.maximum(
np.reshape(bf_weights, [length, height, width, 1]), max_bf_weights)
total_bf_weights += np.reshape(bf_weights, [length, height, width, 1])
surface_volume += volume_addition
if renormalize:
surface_volume /= total_bf_weights
surface_volume[max_bf_weights < 0.0001] = 1.0
return surface_volume
def visualize_prediction(quadrics,
centers,
radii,
renormalize,
thresh=0.0,
input_volumes=None):
"""Creates a [batch_size, height, width, 3/4] image visualizing the output."""
# TODO(kgenova) All of this needs to go or be rewritten to work with the
# diffren sampler.
prediction_count = quadrics.shape[0]
images = []
volumes = []
for i in range(prediction_count):
if input_volumes is not None:
volume = input_volumes[i, ...]
else:
volume = sample_surface(
quadrics[i, :, :, :],
centers[i, :, :],
radii[i, :, :],
length=64,
height=64,
width=64,
renormalize=renormalize)
image, _, _ = cube_and_render(volume, thresh)
target_height = 256
target_width = 256
image = resize(image, (target_width, target_height))
# Append opacity:
image = np.pad(
image, [[0, 0], [0, 0], [0, 1]], mode='constant', constant_values=1.0)
images.append(image)
volumes.append(volume)
return np.stack(images), np.stack(volumes)
|
{
"content_hash": "b53ca19df8e046a846615c1fa44211b9",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 80,
"avg_line_length": 36.80278422273782,
"alnum_prop": 0.6332744924977934,
"repo_name": "google/ldif",
"id": "1ccf547bab1af07520b77677d9bb7724287ac99f",
"size": "16456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldif/util/np_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "71245"
},
{
"name": "Cuda",
"bytes": "39584"
},
{
"name": "Jupyter Notebook",
"bytes": "10649"
},
{
"name": "Makefile",
"bytes": "855"
},
{
"name": "Python",
"bytes": "691453"
},
{
"name": "Shell",
"bytes": "15106"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0056_auto_20171030_1446'),
]
operations = [
migrations.AddField(
model_name='partner',
name='title',
field=models.CharField(blank=True, max_length=300, null=True),
),
]
|
{
"content_hash": "f16462188850676209fe156b22aef71b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.055555555555557,
"alnum_prop": 0.5944584382871536,
"repo_name": "OKThess/website",
"id": "7cf4ea82ce6550def9eba116a7dec3eec4d5cb23",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/migrations/0057_partner_title.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22963"
},
{
"name": "Dockerfile",
"bytes": "314"
},
{
"name": "HCL",
"bytes": "7627"
},
{
"name": "HTML",
"bytes": "173826"
},
{
"name": "Python",
"bytes": "106099"
},
{
"name": "Shell",
"bytes": "266"
}
],
"symlink_target": ""
}
|
import itertools
from abc import ABCMeta, abstractmethod
from qiime2.core.util import tuplize, ImmutableBase
def maximal_antichain(*types):
maximal_elements = {} # easy to delete, retains order
for t in types:
placed = False
for e in list(maximal_elements):
if e <= t:
# Delete first! Duplicate keys would disappear otherwise
del maximal_elements[e]
maximal_elements[t] = None
placed = True
if not placed:
maximal_elements[t] = None
return tuple(maximal_elements)
def minimal_antichain(*types):
minimal_elements = {} # easy to delete, retains order
for t in types:
placed = False
for e in list(minimal_elements):
if t <= e:
# Delete first! Duplicate keys would disappear otherwise
del minimal_elements[e]
minimal_elements[t] = None
placed = True
if not placed:
minimal_elements[t] = None
return tuple(minimal_elements)
class _ExpBase(ImmutableBase, metaclass=ABCMeta):
def __init__(self, template):
# Super basic smoke-test
assert template is None or template.is_template
self.template = template
def __getattr__(self, name):
if ('template' in self.__dict__
and self.template is not None
and name in self.template.public_proxy):
return getattr(self.template, name)
raise AttributeError("%r object has no attribute %r"
% (type(self), name))
# Prevent infinite recursion when pickling due to __getattr__
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
@property
def name(self):
return self.template.get_name_expr(self)
@property
def kind(self):
return self.template.get_kind_expr(self)
@abstractmethod
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
return not self == other
@abstractmethod
def __le__(self, other):
raise NotImplementedError
@abstractmethod
def __ge__(self, other):
raise NotImplementedError
@abstractmethod
def __or__(self, other):
raise NotImplementedError
def __ror__(self, other):
return self | other
@abstractmethod
def __and__(self, other):
raise NotImplementedError
def __rand__(self, other):
return self & other
@abstractmethod
def equals(self, other):
raise NotImplementedError
def is_concrete(self):
return False
def iter_symbols(self):
yield self.name
class IncompleteExp(_ExpBase):
def __init__(self, template):
super().__init__(template)
if (self.template is None
or not list(self.template.get_field_names_expr(self))):
raise ValueError("Template %r has no fields, should not be used"
" with a IncompleteExp." % (template,))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return (self.name == other.name
and tuple(self.template.get_field_names_expr(self))
== tuple(other.template.get_field_names_expr(self)))
def __hash__(self):
return (hash(type(self))
^ hash(self.name)
^ hash(tuple(self.template.get_field_names_expr(self))))
def __repr__(self):
fields = ', '.join(
'{%s}' % f for f in self.template.get_field_names_expr(self))
return self.name + ('[%s]' % fields)
def __le__(self, other):
raise TypeError("Cannot compare subtype, %r is missing arguments"
" for its fields." % (self,))
def __ge__(self, other):
raise TypeError("Cannot compare supertype, %r is missing arguments"
" for its fields." % (self,))
def __contains__(self, value):
raise TypeError("Cannot check membership of %r, %r is missing"
" arguments for its fields." % (value, self))
def __mod__(self, predicate):
raise TypeError("Cannot apply predicate %r, %r is missing arguments"
" for its fields." % (predicate, self))
def __or__(self, other):
raise TypeError("Cannot union with %r, %r is missing arguments"
" for its fields." % (other, self))
def __and__(self, other):
raise TypeError("Cannot intersect with %r, %r is missing arguments"
" for its fields." % (other, self))
def __getitem__(self, fields):
fields = tuplize(fields)
for field in fields:
if not isinstance(field, _AlgebraicExpBase):
raise TypeError("Field %r is not complete type expression."
% (field,))
self.template.validate_fields_expr(self, fields)
return TypeExp(self.template, fields=fields)
def equals(self, other):
return self == other
class _AlgebraicExpBase(_ExpBase):
def __le__(self, other):
first = self._is_subtype_(other)
if first is not NotImplemented:
return first
second = other._is_supertype_(self)
if second is not NotImplemented:
return second
return False
def __ge__(self, other):
first = self._is_supertype_(other)
if first is not NotImplemented:
return first
second = other._is_subtype_(self)
if second is not NotImplemented:
return second
return False
def __or__(self, other):
if not ((self.is_bottom() or other.is_bottom())
or (self.get_union_membership() == other.get_union_membership()
and self.get_union_membership() is not None)):
raise TypeError("Cannot union %r and %r" % (self, other))
if self >= other:
return self
if self <= other:
return other
union = UnionExp((*self.unpack_union(), *other.unpack_union()))
return union.normalize()
def __and__(self, other):
if (not self.can_intersect() or not other.can_intersect()
or (self.kind != other.kind
and not (self.is_top() or other.is_top()))):
raise TypeError("Cannot intersect %r and %r" % (self, other))
# inverse of __or__
if self >= other:
return other
if self <= other:
return self
# Distribute over union
if isinstance(self, UnionExp) or isinstance(other, UnionExp):
m = []
for s, o in itertools.product(self.unpack_union(),
other.unpack_union()):
m.append(s & o)
return UnionExp(m).normalize()
elements = list(itertools.chain(self.unpack_intersection(),
other.unpack_intersection()))
if len(elements) > 1:
# Give the expression a chance to collapse, as many intersections
# are contradictions
collapse = elements[0]._collapse_intersection_(elements[1])
if collapse is not NotImplemented:
for e in elements[2:]:
collapse = collapse._collapse_intersection_(e)
return collapse
# Back to the regularly scheduled inverse of __or__
members = minimal_antichain(*self.unpack_intersection(),
*other.unpack_intersection())
return IntersectionExp(members)
def _collapse_intersection_(self, other):
return NotImplemented
def equals(self, other):
return self <= other <= self
def is_concrete(self):
return len(list(self.unpack_union())) == 1
def get_union_membership(self):
if self.template is not None:
return self.template.get_union_membership_expr(self)
return True
def can_intersect(self):
return True
# These methods are to be overridden by UnionExp
def is_bottom(self):
return False
def unpack_union(self):
yield self
# These methods are to be overridden by IntersectionExp
def is_top(self):
return False
def unpack_intersection(self):
yield self
class TypeExp(_AlgebraicExpBase):
def __init__(self, template, fields=(), predicate=None):
super().__init__(template)
if predicate is not None and predicate.is_top():
predicate = None
self.fields = tuple(fields)
self.predicate = predicate
super()._freeze_()
@property
def full_predicate(self):
if self.predicate is None:
return IntersectionExp()
return self.predicate
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return (self.kind == other.kind
and self.name == other.name
and self.fields == other.fields
and self.full_predicate == other.full_predicate)
def __hash__(self):
return (hash(type(self))
^ hash(self.kind) ^ hash(self.name)
^ hash(self.fields) ^ hash(self.predicate))
def __repr__(self):
result = self.name
if self.fields:
result += '[%s]' % ', '.join(repr(f) for f in self.fields)
if self.predicate:
predicate = repr(self.predicate)
if self.predicate.template is None: # is _IdentityExpBase
predicate = '(%s)' % predicate
result += ' % ' + predicate
return result
def __getitem__(self, fields):
raise TypeError("Cannot apply fields (%r) to %r,"
" fields already present." % (fields, self))
def __contains__(self, value):
return (self.template.is_element_expr(self, value)
and value in self.full_predicate)
def __iter__(self):
yield from {self.duplicate(fields=fields)
for fields in itertools.product(*self.fields)}
def iter_symbols(self):
yield self.name
for field in self.fields:
yield from field.iter_symbols()
def _is_subtype_(self, other):
if other.template is None:
return NotImplemented
if not self.template.is_symbol_subtype_expr(self, other):
return False
for f1, f2 in itertools.zip_longest(self.fields, other.fields,
# more fields = more specific
fillvalue=IntersectionExp()):
if not (f1 <= f2):
return False
if not (self.full_predicate <= other.full_predicate):
return False
return True
def _is_supertype_(self, other):
return NotImplemented
def __mod__(self, predicate):
if self.predicate:
raise TypeError("%r already has a predicate, will not add %r"
% (self, predicate))
if predicate is None or predicate.is_top():
return self
return self.duplicate(predicate=predicate)
def __rmod__(self, other):
raise TypeError("Predicate (%r) must be applied to the right-hand side"
" of a type expression." % (other,))
def duplicate(self, fields=(), predicate=None):
if fields == ():
fields = self.fields
else:
self.template.validate_fields_expr(self, fields)
if predicate is None:
predicate = self.predicate
elif predicate.is_top():
predicate = None
elif predicate.template is not None:
self.template.validate_predicate_expr(self, predicate)
return self.__class__(self.template, fields=fields,
predicate=predicate)
def _collapse_intersection_(self, other):
if self.name != other.name:
return UnionExp()
new_fields = tuple(
s & o for s, o in itertools.zip_longest(self.fields, other.fields,
# same as a type mismatch
fillvalue=UnionExp()))
if any(f.is_bottom() for f in new_fields):
return UnionExp()
new_predicate = self.full_predicate & other.full_predicate
if new_predicate.is_bottom():
return UnionExp()
return self.duplicate(fields=new_fields, predicate=new_predicate)
def is_concrete(self):
return self._bool_attr_method('is_concrete')
def _bool_attr_method(self, method_name):
def method(s): return getattr(s, method_name)()
if any(not method(f) for f in self.fields):
return False
if not method(self.full_predicate):
return False
return True
def to_ast(self):
ast = {
"type": "expression",
"builtin": True,
"name": self.name,
"predicate": self.predicate.to_ast() if self.predicate else None,
"fields": [field.to_ast() for field in self.fields]
}
self.template.update_ast_expr(self, ast)
return ast
class PredicateExp(_AlgebraicExpBase):
def __init__(self, template):
super().__init__(template)
super()._freeze_()
def __eq__(self, other):
return self.template == other.template
def __hash__(self):
return hash(self.template)
def __contains__(self, value):
return self.template.is_element_expr(self, value)
def __repr__(self):
return repr(self.template)
def _is_subtype_(self, other):
if (other.template is not None
and self.template.is_symbol_subtype_expr(self, other)):
return True
return NotImplemented
def _is_supertype_(self, other):
if (other.template is not None
and self.template.is_symbol_supertype_expr(self, other)):
return True
return NotImplemented
def _collapse_intersection_(self, other):
first = self.template.collapse_intersection(other.template)
if first is None:
return UnionExp()
elif first is not NotImplemented:
return self.__class__(first)
second = other.template.collapse_intersection(self.template)
if second is None:
return UnionExp()
elif second is not NotImplemented:
return self.__class__(second)
return NotImplemented
def to_ast(self):
ast = {
"type": "predicate",
"name": self.name,
}
self.template.update_ast_expr(self, ast)
return ast
class _IdentityExpBase(_AlgebraicExpBase):
"""
Base class for IntersectionExp and UnionExp.
If there are no members, then they are Top or Bottom types respectively
and represent identity values (like 1 for mul and 0 for add) for the
type algebra.
There is no template object for these expressions. That property will
always be `None`.
"""
_operator = ' ? '
def __init__(self, members=()):
super().__init__(template=None)
self.members = tuple(members)
super()._freeze_()
@property
def kind(self):
if not self.members:
return "identity"
return self.members[0].kind
@property
def name(self):
return ""
def __eq__(self, other):
return (type(self) is type(other)
and set(self.members) == set(other.members))
def __hash__(self):
return hash(type(self)) ^ hash(frozenset(self.members))
def __repr__(self):
if not self.members:
return self.__class__.__name__ + "()"
return self._operator.join(repr(m) for m in self.members)
def __iter__(self):
for m in self.unpack_union():
yield from m
def iter_symbols(self):
for m in self.unpack_union():
yield from m.iter_symbols()
def get_union_membership(self):
if self.members:
return self.members[0].get_union_membership()
class UnionExp(_IdentityExpBase):
_operator = ' | ' # used by _IdentityExpBase.__repr__
def __contains__(self, value):
return any(value in s for s in self.members)
def _is_subtype_(self, other):
if (isinstance(other, self.__class__)
and type(other) is not self.__class__): # other is subclass
return NotImplemented
# if other isn't a union, becomes all(s <= other for s in self.members)
return all(any(s <= o for o in other.unpack_union())
for s in self.unpack_union())
def _is_supertype_(self, other):
return all(any(s >= o for s in self.unpack_union())
for o in other.unpack_union())
def is_bottom(self):
return not self.members
def unpack_union(self):
yield from self.members
def to_ast(self):
return {
"type": "union",
"members": [m.to_ast() for m in self.members]
}
def normalize(self):
elements = self.members
groups = {}
for e in elements:
if type(e) is TypeExp:
candidate = e.duplicate(predicate=IntersectionExp())
if candidate in groups:
groups[candidate].append(e)
else:
groups[candidate] = [e]
else:
# groups should be empty already, but don't even attempt
# collapsing if its a union of type expressions and "other"
groups = {}
break
if groups:
elements = []
for canidate, group in groups.items():
if len(group) == 1:
elements.append(group[0])
else:
predicate = UnionExp([t.full_predicate for t in group])
predicate = predicate.normalize()
elements.append(candidate.duplicate(predicate=predicate))
if len(elements) < 20:
members = maximal_antichain(*elements)
else:
members = elements
if len(members) == 1:
return members[0]
return UnionExp(members)
class IntersectionExp(_IdentityExpBase):
_operator = ' & ' # used by _IdentityExpBase.__repr__
def __contains__(self, value):
return all(value in s for s in self.members)
def _is_subtype_(self, other):
if isinstance(other, UnionExp):
# Union will treat `self` as an atomic type, comparing
# its elements against `self`. This in turn will recurse back to
# `self` allowing it to check if it is a subtype of the union
# elements. That check will ultimately compare the elements of
# `self` against a single element of the union.
return NotImplemented
return all(any(s <= o for s in self.unpack_intersection())
for o in other.unpack_intersection())
def _is_supertype_(self, other):
if isinstance(other, UnionExp):
return NotImplemented
return all(any(s >= o for o in other.unpack_intersection())
for s in self.unpack_intersection())
def is_top(self):
return not self.members
def unpack_intersection(self):
yield from self.members
def to_ast(self):
return {
"type": "intersection",
"members": [m.to_ast() for m in self.members]
}
|
{
"content_hash": "455ae485dc542795f4406ff8eefcdb6c",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 79,
"avg_line_length": 31.096875,
"alnum_prop": 0.55712993668978,
"repo_name": "thermokarst/qiime2",
"id": "2102a933d29dd52008c03a5d094cc36fd194d090",
"size": "20252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiime2/core/type/grammar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "506"
},
{
"name": "Python",
"bytes": "895865"
},
{
"name": "Shell",
"bytes": "217"
},
{
"name": "TeX",
"bytes": "5480"
}
],
"symlink_target": ""
}
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Xml;
using PrettyGood.Util;
namespace PrettyGood.LastFm
{
public class FoundArtist
{
internal FoundArtist(XmlElement el)
{
this.name = Xml.GetTextOfSubElement(el, "name");
this.mbid = Xml.GetTextOfSubElementOrNull(el, "mbid");
this.url = Xml.GetTextOfSubElement(el, "url");
}
public bool seemsGoofy
{
get
{
if (string.IsNullOrEmpty(mbid)) return true;
else if (url.Contains("+noredirect")) return true;
else return false;
}
}
public readonly string name;
public readonly string mbid;
public readonly string url;
}
}
|
{
"content_hash": "2827ecc312a899de8798b6b514583a9f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 24.939393939393938,
"alnum_prop": 0.5759416767922235,
"repo_name": "madeso/prettygood",
"id": "8b1058c5f3d19ab6ab737e0c62a01808f56c5280",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dotnet/LastFm/FoundArtist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "384285"
}
],
"symlink_target": ""
}
|
import unittest
import matchcompiler
class MatchCompilerTest(unittest.TestCase):
def setUp(self):
self.mc = matchcompiler.MatchCompiler(verify_mode=False)
def test_parseMatch(self):
self.assertEqual(self.mc.parseMatch(' Token::Match(tok, ";") ', 2), [
'Token::Match(tok, ";")', 'tok', ' ";"'])
self.assertEqual(self.mc.parseMatch(' Token::Match(tok,', 2), None)
# multiline Token::Match is not supported yet
self.assertEqual(self.mc.parseMatch(' Token::Match(Token::findsimplematch(tok,")"), ";")', 2), [
'Token::Match(Token::findsimplematch(tok,")"), ";")', 'Token::findsimplematch(tok,")")', ' ";"']) # inner function call
def test_replaceTokenMatch(self):
input = 'if (Token::Match(tok, "foobar")) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match1(tok)) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
input = 'if (Token::Match(tok->next()->next(), "foobar %type% %num%")) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match2(tok->next()->next())) {')
self.assertEqual(2, len(self.mc._matchStrs))
input = 'if (Token::Match(tok, "foo\"special\"bar %num%")) {'
output = self.mc._replaceTokenMatch(input)
# FIXME: Currently detected as non-static pattern
self.assertEqual(
output, 'if (Token::Match(tok, "foo"special"bar %num%")) {')
# self.assertEqual(3, len(self.mc._matchStrs))
def test_replaceTokenMatchWithVarId(self):
input = 'if (Token::Match(tok, "foobar %varid%", 123)) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match1(tok, 123)) {')
self.assertEqual(1, len(self.mc._matchStrs))
input = 'if (Token::Match(tok->next()->next(), "%varid% foobar", tok->varId())) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(
output, 'if (match2(tok->next()->next(), tok->varId())) {')
self.assertEqual(1, len(self.mc._matchStrs))
input = 'if (Token::Match(tok, "foo\"special\"bar %type% %varid%", my_varid_cache)) {'
output = self.mc._replaceTokenMatch(input)
# FIXME: Currently detected as non-static pattern
self.assertEqual(
output, 'if (Token::Match(tok, "foo"special"bar %type% %varid%", my_varid_cache)) {')
# self.assertEqual(1, len(self.mc._matchStrs))
# test caching: reuse existing matchX()
input = 'if (Token::Match(tok, "foobar %varid%", 123)) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match1(tok, 123)) {')
self.assertEqual(1, len(self.mc._matchStrs))
# two in one line
input = 'if (Token::Match(tok, "foobar2 %varid%", 123) || Token::Match(tok, "%type% %varid%", 123)) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match3(tok, 123) || match4(tok, 123)) {')
self.assertEqual(3, len(self.mc._matchStrs))
def test_replaceTokenSimpleMatch(self):
input = 'if (Token::simpleMatch(tok, "foobar")) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match1(tok)) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
input = 'if (Token::simpleMatch(tok->next()->next(), "foobar")) {'
output = self.mc._replaceTokenMatch(input)
self.assertEqual(output, 'if (match1(tok->next()->next())) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
input = 'if (Token::simpleMatch(tok, "foo\"special\"bar")) {'
output = self.mc._replaceTokenMatch(input)
# FIXME: Currently detected as non-static pattern
self.assertEqual(
output, 'if (Token::simpleMatch(tok, "foo\"special\"bar")) {')
self.assertEqual(1, len(self.mc._matchStrs))
def test_replaceTokenFindSimpleMatch(self):
input = 'if (Token::findsimplematch(tok, "foobar")) {'
output = self.mc._replaceTokenFindMatch(input)
self.assertEqual(output, 'if (findmatch1(tok)) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
input = 'if (Token::findsimplematch(tok->next()->next(), "foobar", tok->link())) {'
output = self.mc._replaceTokenFindMatch(input)
self.assertEqual(
output, 'if (findmatch2(tok->next()->next(), tok->link())) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
input = 'if (Token::findsimplematch(tok, "foo\"special\"bar")) {'
output = self.mc._replaceTokenFindMatch(input)
# FIXME: Currently detected as non-static pattern
self.assertEqual(
output, 'if (Token::findsimplematch(tok, "foo\"special\"bar")) {')
self.assertEqual(1, len(self.mc._matchStrs))
def test_replaceTokenFindMatch(self):
input = 'if (Token::findmatch(tok, "foobar")) {'
output = self.mc._replaceTokenFindMatch(input)
self.assertEqual(output, 'if (findmatch1(tok)) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
# findmatch with varid
input = 'if (Token::findmatch(tok, "foobar %varid%", tok->varId())) {'
output = self.mc._replaceTokenFindMatch(input)
self.assertEqual(output, 'if (findmatch2(tok, tok->varId())) {')
self.assertEqual(1, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
# findmatch with end token
input = 'if (Token::findmatch(tok->next()->next(), "foobar %type%", tok->link())) {'
output = self.mc._replaceTokenFindMatch(input)
self.assertEqual(
output, 'if (findmatch3(tok->next()->next(), tok->link())) {')
self.assertEqual(2, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
# findmatch with end token and varid
input = 'if (Token::findmatch(tok->next()->next(), "foobar %type% %varid%", tok->link(), 123)) {'
output = self.mc._replaceTokenFindMatch(input)
self.assertEqual(
output, 'if (findmatch4(tok->next()->next(), tok->link(), 123)) {')
self.assertEqual(2, len(self.mc._matchStrs))
self.assertEqual(1, self.mc._matchStrs['foobar'])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "262434dda2055459ee55ffb5ad8651c2",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 145,
"avg_line_length": 48.25714285714286,
"alnum_prop": 0.5994671403197158,
"repo_name": "chillbu/cblib",
"id": "a2ce1b68796d1a57d6307fd78302b516750c706d",
"size": "7527",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/tools/cppcheck-1.63/tools/test_matchcompiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "7339"
},
{
"name": "C",
"bytes": "39950"
},
{
"name": "C++",
"bytes": "6035089"
},
{
"name": "CSS",
"bytes": "5427"
},
{
"name": "JavaScript",
"bytes": "5946"
},
{
"name": "Makefile",
"bytes": "47508"
},
{
"name": "PHP",
"bytes": "636217"
},
{
"name": "Python",
"bytes": "342529"
},
{
"name": "Shell",
"bytes": "10294"
},
{
"name": "VimL",
"bytes": "9934"
}
],
"symlink_target": ""
}
|
from lxml import etree
from nova.api.openstack.compute.contrib import floating_ip_pools
from nova import context
from nova import network
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_floating_ip_pools(self, context):
return [{'name': 'nova'},
{'name': 'other'}]
class FloatingIpPoolTest(test.NoDBTestCase):
def setUp(self):
super(FloatingIpPoolTest, self).setUp()
self.stubs.Set(network.api.API, "get_floating_ip_pools",
fake_get_floating_ip_pools)
self.context = context.RequestContext('fake', 'fake')
self.controller = floating_ip_pools.FloatingIPPoolsController()
def test_translate_floating_ip_pools_view(self):
pools = fake_get_floating_ip_pools(None, self.context)
view = floating_ip_pools._translate_floating_ip_pools_view(pools)
self.assertTrue('floating_ip_pools' in view)
self.assertEqual(view['floating_ip_pools'][0]['name'],
pools[0]['name'])
self.assertEqual(view['floating_ip_pools'][1]['name'],
pools[1]['name'])
def test_floating_ips_pools_list(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ip-pools')
res_dict = self.controller.index(req)
pools = fake_get_floating_ip_pools(None, self.context)
response = {'floating_ip_pools': pools}
self.assertEqual(res_dict, response)
class FloatingIpPoolSerializerTest(test.NoDBTestCase):
def test_index_serializer(self):
serializer = floating_ip_pools.FloatingIPPoolsTemplate()
text = serializer.serialize(dict(
floating_ip_pools=[
dict(name='nova'),
dict(name='other')
]))
tree = etree.fromstring(text)
self.assertEqual('floating_ip_pools', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('floating_ip_pool', tree[0].tag)
self.assertEqual('floating_ip_pool', tree[1].tag)
self.assertEqual('nova', tree[0].get('name'))
self.assertEqual('other', tree[1].get('name'))
|
{
"content_hash": "a91a99d48fd887031115a6429936a4f0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 37.08620689655172,
"alnum_prop": 0.6294746629474663,
"repo_name": "pombredanne/MOG",
"id": "a12d41e6edee985f27042e2005756bc52381c930",
"size": "2811",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from zerver.models import UserProfile, UserActivity, UserActivityInterval, Message
from django.utils.timezone import utc
from typing import Any, Dict, List, Sequence, Set
from datetime import datetime, timedelta
# Return the amount of Zulip usage for this user between the two
# given dates
def seconds_usage_between(user_profile: UserProfile, begin: datetime, end: datetime) -> timedelta:
intervals = UserActivityInterval.objects.filter(user_profile=user_profile,
end__gte=begin,
start__lte=end)
duration = timedelta(0)
for interval in intervals:
start = max(begin, interval.start)
finish = min(end, interval.end)
duration += finish-start
return duration
|
{
"content_hash": "4afe43fc18a446e36ad3e8980d654eae",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 98,
"avg_line_length": 42.26315789473684,
"alnum_prop": 0.651307596513076,
"repo_name": "mahim97/zulip",
"id": "aacb272924f6020e4a2b3c7970731601e97fd7e8",
"size": "828",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "zerver/lib/statistics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
}
|
"""
With Transcrypt, we are running in javascript environment, hence
pythonic introspection is not available at runtime, making do with
decorators instead.
"""
from pybble.pebblejs import console
tests = []
def test(wrapped):
tests.append(wrapped)
def wrapper():
return wrapped
return wrapper()
@test
def test_console():
console.log('Hello from Console Test, let us begin.')
for run_test in tests:
run_test()
|
{
"content_hash": "a1b85ef66301a40e2874a5df6d821990",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 17.153846153846153,
"alnum_prop": 0.7017937219730942,
"repo_name": "hiway/pybble",
"id": "91c3a5641a9c65f94505031a7dcbfd7ba78f6105",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4647"
}
],
"symlink_target": ""
}
|
from spamhandling import check_if_spam, check_if_spam_json
from datahandling import add_blacklisted_user, add_whitelisted_user
from parsing import get_user_from_url
import pytest
import os
test_data_inputs = []
with open("test/data_test_spamhandling.txt", "r") as f:
test_data_inputs = f.readlines()
@pytest.mark.parametrize("title, body, username, site, match", [
('18669786819 gmail customer service number 1866978-6819 gmail support number', '', '', '', True),
('Is there any http://www.hindawi.com/ template for Cloud-Oriented Data Center Networking?', '', '', '', True),
('', '', 'bagprada', '', True),
('12 Month Loans quick @ http://www.quick12monthpaydayloans.co.uk/Elimination of collateral pledging', '', '', '', True),
('support for yahoo mail 18669786819 @call for helpline number', '', '', '', True),
('yahoo email tech support 1 866 978 6819 Yahoo Customer Phone Number ,Shortest Wait', '', '', '', True),
('What is the value of MD5 checksums if the MD5 hash itself could potentially also have been manipulated?', '', '', '', False),
('Probability: 6 Dice are rolled. Which is more likely, that you get exactly one 6, or that you get 6 different numbers?', '', '', '', False),
('The Challenge of Controlling a Powerful AI', '', 'Serban Tanasa', '', False),
('Reproducing image of a spiral using TikZ', '', 'Kristoffer Ryhl', '', False),
('What is the proper way to say "queryer"', '', 'jedwards', '', False),
('What\'s a real-world example of "overfitting"?', '', 'user3851283', '', False),
('How to avoid objects when traveling at greater than .75 light speed. or How Not to Go SPLAT?', '', 'bowlturner', '', False),
('Is it unfair to regrade prior work after detecting cheating?', '', 'Village', '', False),
('Inner workings of muscles', '', '', 'fitness.stackexchange.com', False),
('Cannot access http://stackoverflow.com/ with proxy enabled', '', '', 'superuser.com', False),
('kkkkkkkkkkkkkkkkkkkkkkkkkkkk', '<p>bbbbbbbbbbbbbbbbbbbbbb</p>', '', 'stackoverflow.com', True),
('Enhance SD Male Enhancement Supplements', '', '', '', True)
])
def test_check_if_spam(title, body, username, site, match):
# We can't check blacklists/whitelists in tests, so these are set to their default values
user_url = ""
post_id = 0
# If we want to test answers separatly, this should be changed
is_answer = False
is_spam, reason, _ = check_if_spam(title, body, username, user_url, site, post_id, is_answer, False, 1, 0)
print title
assert match == is_spam
@pytest.mark.parametrize("data, match", [
(test_data_inputs[0], False)
])
def test_check_if_spam_json(data, match):
is_spam, reason, _ = check_if_spam_json(data)
assert match == is_spam
@pytest.mark.skipif(os.path.isfile("blacklistedUsers.txt"),
reason="shouldn't overwrite file")
def test_blacklisted_user():
user_url = 'http://stackoverflow.com/users/1/jeff-atwood'
user = get_user_from_url(user_url)
add_blacklisted_user(user, "", "")
is_spam, reason, _ = check_if_spam("", "", "", user_url, "stackoverflow.com", "1", False, False, 1, 0)
assert is_spam is True
# cleanup
os.remove("blacklistedUsers.txt")
@pytest.mark.skipif(os.path.isfile("whitelistedUsers.txt"),
reason="shouldn't overwrite file")
def test_whitelisted_user():
user_url = 'http://stackoverflow.com/users/2/geoff-dalgas'
user = get_user_from_url(user_url)
add_whitelisted_user(user)
user_url2 = 'http://stackoverflow.com/users/0/test'
user2 = get_user_from_url(user_url2)
add_whitelisted_user(user2)
is_spam, reason, _ = check_if_spam("", "", "bagprada", user_url, "stackoverflow.com", "1", False, False, 1, 0)
assert is_spam is False
is_spam, reason, _ = check_if_spam("baba ji", "", "", user_url, "stackoverflow.com", "2", False, False, 1, 0)
assert is_spam is True
is_spam, reason, _ = check_if_spam("baba ji", "", "bagprada", user_url, "stackoverflow.com", "3", False, False, 1, 0)
assert is_spam is True
is_spam, reason, _ = check_if_spam("test", "", "baba ji - muscle building", user_url2, "stackoverflow.com", "0", False, False, 1, 0)
assert is_spam is False
# cleanup
os.remove("whitelistedUsers.txt")
|
{
"content_hash": "5b9132314bbba2d2fba5a3eadedf9eb5",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 146,
"avg_line_length": 53.074074074074076,
"alnum_prop": 0.6515468713654339,
"repo_name": "ArtOfCode-/SmokeDetector",
"id": "bbb1e03c3551a8486bdfde797679d6cc4fd18945",
"size": "4299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_spamhandling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "211451"
},
{
"name": "Shell",
"bytes": "1310"
}
],
"symlink_target": ""
}
|
""" Encapsulates Functionality for Generating Code Snippets for a Task Description. """
from config import LANGUAGE
from code_snippet_providers.github_code_snippet_provider import GithubCodeSnippetProvider
class CodeSnippetGenerator(object):
""" Encapsulates Functionality for Generating Code Snippets for a Task Description.
:attr _code_snippets: A list of code snippets represented as strings.
:type _code_snippets: list
:attr _code_snippet_providers: A list of `AbstractCodeSnippetProviders` for gathering code snippets.
:type _code_snippet_providers: list
"""
CODE_SNIPPET_PROVIDERS = [
GithubCodeSnippetProvider,
]
def __init__(self, task_description):
""" Initializes the `CodeSnippetGenerator` object.
:param task_description: A description of the task to complete.
:type task_description: str
"""
self._code_snippets = []
self._code_snippet_providers = []
for code_snippet_provider in CodeSnippetGenerator.CODE_SNIPPET_PROVIDERS:
self._code_snippet_providers.append(code_snippet_provider(task_description, LANGUAGE))
def generate_code_snippets(self):
""" Generates and returns code snippets for the task description.
:return: A list of code snippets represented as strings.
:rtype: list
"""
for code_snippet_provider in self._code_snippet_providers:
self._code_snippets += code_snippet_provider.get_code_snippets()
return self._code_snippets
|
{
"content_hash": "16823149730627814552d070e49ecbdf",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 104,
"avg_line_length": 40.36842105263158,
"alnum_prop": 0.6988265971316818,
"repo_name": "berjc/code-complete",
"id": "170cea495426d8b918309d206e682943cbe75cd3",
"size": "1559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_complete/code_snippet_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "48359"
}
],
"symlink_target": ""
}
|
import mock
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.plugins import constants as plugins_constants
from neutron_lib.plugins import directory
from neutron_lib.services.qos import constants as qos_consts
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron.objects import base as base_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.services.qos import qos_plugin
from neutron.tests.unit.services.qos import base
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
class TestQosPlugin(base.BaseQosTestCase):
def setUp(self):
super(TestQosPlugin, self).setUp()
self.setup_coreplugin(load_plugins=False)
mock.patch('neutron.objects.db.api.create_object').start()
mock.patch('neutron.objects.db.api.update_object').start()
mock.patch('neutron.objects.db.api.delete_object').start()
mock.patch('neutron.objects.db.api.get_object').start()
_mock_qos_load_attr = mock.patch(
'neutron.objects.qos.policy.QosPolicy.obj_load_attr')
self.mock_qos_load_attr = _mock_qos_load_attr.start()
# We don't use real models as per mocks above. We also need to mock-out
# methods that work with real data types
mock.patch(
'neutron.objects.base.NeutronDbObject.modify_fields_from_db'
).start()
mock.patch.object(policy_object.QosPolicy, 'unset_default').start()
mock.patch.object(policy_object.QosPolicy, 'set_default').start()
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.ctxt = context.Context('fake_user', 'fake_tenant')
mock.patch.object(self.ctxt.session, 'refresh').start()
mock.patch.object(self.ctxt.session, 'expunge').start()
self.policy_data = {
'policy': {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
self.rule_data = {
'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(),
'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'id': uuidutils.generate_uuid(),
'dscp_mark': 16},
'minimum_bandwidth_rule': {
'id': uuidutils.generate_uuid(),
'min_kbps': 10}}
self.policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
self.rule = rule_object.QosBandwidthLimitRule(
self.ctxt, **self.rule_data['bandwidth_limit_rule'])
self.dscp_rule = rule_object.QosDscpMarkingRule(
self.ctxt, **self.rule_data['dscp_marking_rule'])
self.min_rule = rule_object.QosMinimumBandwidthRule(
self.ctxt, **self.rule_data['minimum_bandwidth_rule'])
def _validate_driver_params(self, method_name):
self.assertTrue(self.qos_plugin.driver_manager.call.called)
self.assertEqual(self.qos_plugin.driver_manager.call.call_args[0][0],
method_name)
self.assertIsInstance(
self.qos_plugin.driver_manager.call.call_args[0][2],
policy_object.QosPolicy
)
def test_get_ports_with_policy(self):
network_ports = [
mock.MagicMock(qos_policy_id=None),
mock.MagicMock(qos_policy_id=uuidutils.generate_uuid()),
mock.MagicMock(qos_policy_id=None)
]
ports = [
mock.MagicMock(qos_policy_id=self.policy.id),
]
expected_network_ports = [
port for port in network_ports if port.qos_policy_id is None]
expected_ports = ports + expected_network_ports
with mock.patch(
'neutron.objects.ports.Port.get_objects',
side_effect=[network_ports, ports]
), mock.patch.object(
self.policy, "get_bound_networks"
), mock.patch.object(
self.policy, "get_bound_ports"
):
policy_ports = self.qos_plugin._get_ports_with_policy(
self.ctxt, self.policy)
self.assertEqual(
len(expected_ports), len(policy_ports))
for port in expected_ports:
self.assertIn(port, policy_ports)
def _test_validate_create_port_callback(self, policy_id=None,
network_policy_id=None):
port_id = uuidutils.generate_uuid()
kwargs = {
"context": self.ctxt,
"port": {"id": port_id}
}
port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id)
network_mock = mock.MagicMock(
id=uuidutils.generate_uuid(), qos_policy_id=network_policy_id)
policy_mock = mock.MagicMock(id=policy_id)
expected_policy_id = policy_id or network_policy_id
with mock.patch(
'neutron.objects.ports.Port.get_object',
return_value=port_mock
), mock.patch(
'neutron.objects.network.Network.get_object',
return_value=network_mock
), mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_port"
) as validate_policy_for_port:
self.qos_plugin._validate_create_port_callback(
"PORT", "precommit_create", "test_plugin", **kwargs)
if policy_id or network_policy_id:
get_policy.assert_called_once_with(self.ctxt,
id=expected_policy_id)
validate_policy_for_port.assert_called_once_with(policy_mock,
port_mock)
else:
get_policy.assert_not_called()
validate_policy_for_port.assert_not_called()
def test_validate_create_port_callback_policy_on_port(self):
self._test_validate_create_port_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_create_port_callback_policy_on_port_and_network(self):
self._test_validate_create_port_callback(
policy_id=uuidutils.generate_uuid(),
network_policy_id=uuidutils.generate_uuid())
def test_validate_create_port_callback_policy_on_network(self):
self._test_validate_create_port_callback(
network_policy_id=uuidutils.generate_uuid())
def test_validate_create_port_callback_no_policy(self):
self._test_validate_create_port_callback()
def _test_validate_update_port_callback(self, policy_id=None,
original_policy_id=None):
port_id = uuidutils.generate_uuid()
kwargs = {
"context": self.ctxt,
"port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id)
policy_mock = mock.MagicMock(id=policy_id)
with mock.patch(
'neutron.objects.ports.Port.get_object',
return_value=port_mock
) as get_port, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_port"
) as validate_policy_for_port:
self.qos_plugin._validate_update_port_callback(
"PORT", "precommit_update", "test_plugin", **kwargs)
if policy_id is None or policy_id == original_policy_id:
get_port.assert_not_called()
get_policy.assert_not_called()
validate_policy_for_port.assert_not_called()
else:
get_port.assert_called_once_with(self.ctxt, id=port_id)
get_policy.assert_called_once_with(self.ctxt, id=policy_id)
validate_policy_for_port.assert_called_once_with(policy_mock,
port_mock)
def test_validate_update_port_callback_policy_changed(self):
self._test_validate_update_port_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_port_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_port_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_port_callback_policy_removed(self):
self._test_validate_update_port_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def _test_validate_update_network_callback(self, policy_id=None,
original_policy_id=None):
network_id = uuidutils.generate_uuid()
kwargs = {
"context": self.ctxt,
"network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock_with_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(),
qos_policy_id=uuidutils.generate_uuid())
port_mock_without_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(), qos_policy_id=None)
ports = [port_mock_with_own_policy, port_mock_without_own_policy]
policy_mock = mock.MagicMock(id=policy_id)
with mock.patch(
'neutron.objects.ports.Port.get_objects',
return_value=ports
) as get_ports, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_ports"
) as validate_policy_for_ports:
self.qos_plugin._validate_update_network_callback(
"NETWORK", "precommit_update", "test_plugin", **kwargs)
if policy_id is None or policy_id == original_policy_id:
get_policy.assert_not_called()
get_ports.assert_not_called()
validate_policy_for_ports.assert_not_called()
else:
get_policy.assert_called_once_with(self.ctxt, id=policy_id)
get_ports.assert_called_once_with(self.ctxt,
network_id=network_id)
validate_policy_for_ports.assert_called_once_with(
policy_mock, [port_mock_without_own_policy])
def test_validate_update_network_callback_policy_changed(self):
self._test_validate_update_network_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_network_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_network_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_network_callback_policy_removed(self):
self._test_validate_update_network_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def test_validate_policy_for_port_rule_not_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=False
):
self.policy.rules = [self.rule]
self.assertRaises(
n_exc.QosRuleNotSupported,
self.qos_plugin.validate_policy_for_port,
self.policy, port)
def test_validate_policy_for_port_all_rules_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_port(self.policy, port)
except n_exc.QosRuleNotSupported:
self.fail("QosRuleNotSupported exception unexpectedly raised")
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_add_policy(self, mock_qos_policy, mock_create_rbac_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy, 'QosPolicy')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.create_policy(self.ctxt, self.policy_data)
policy_mock_call = mock.call.QosPolicy().create()
create_precommit_mock_call = mock.call.driver.call(
'create_policy_precommit', self.ctxt, mock.ANY)
create_mock_call = mock.call.driver.call(
'create_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_mock_call) <
mock_manager.mock_calls.index(create_precommit_mock_call) <
mock_manager.mock_calls.index(create_mock_call))
def test_add_policy_with_extra_tenant_keyword(self, *mocks):
policy_id = uuidutils.generate_uuid()
project_id = uuidutils.generate_uuid()
tenant_policy = {
'policy': {'id': policy_id,
'project_id': project_id,
'tenant_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
policy_details = {'id': policy_id,
'project_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}
with mock.patch('neutron.objects.qos.policy.QosPolicy') as QosMocked:
self.qos_plugin.create_policy(self.ctxt, tenant_policy)
QosMocked.assert_called_once_with(self.ctxt, **policy_details)
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch.object(policy_object.QosPolicy, 'update')
def test_update_policy(self, mock_qos_policy_update,
mock_create_rbac_policy, mock_qos_policy_get):
mock_qos_policy_get.return_value = self.policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
fields = base_object.get_updatable_fields(
policy_object.QosPolicy, self.policy_data['policy'])
self.qos_plugin.update_policy(
self.ctxt, self.policy.id, {'policy': fields})
self._validate_driver_params('update_policy')
policy_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
@mock.patch('neutron.objects.db.api.get_object', return_value=None)
@mock.patch.object(policy_object.QosPolicy, 'delete')
def test_delete_policy(self, mock_qos_policy_delete, mock_api_get_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
self._validate_driver_params('delete_policy')
policy_delete_mock_call = mock.call.delete()
delete_precommit_mock_call = mock.call.driver.call(
'delete_policy_precommit', self.ctxt, mock.ANY)
delete_mock_call = mock.call.driver.call(
'delete_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_delete_mock_call) <
mock_manager.mock_calls.index(delete_precommit_mock_call) <
mock_manager.mock_calls.index(delete_mock_call))
def test_create_policy_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy), mock.patch(
'neutron.objects.qos.qos_policy_validator'
'.check_bandwidth_rule_conflict', return_value=None):
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy')
def test_create_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy')
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_max_more_than_min(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy')
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
setattr(_policy, "rules", [self.min_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(n_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000000
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(n_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'update')
def test_update_policy_rule(self, mock_qos_rule_update):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy')
rule_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_update_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy')
rules = [self.rule, self.min_rule]
setattr(_policy, "rules", rules)
self.mock_qos_load_attr.reset_mock()
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_rule.id,
self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy')
def test_update_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy')
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
n_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_bandwidth_rule,
self.ctxt, self.min_rule.id,
self.policy.id, self.rule_data)
def test_update_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy')
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
n_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id,
self.policy.id, self.rule_data)
def _get_policy(self):
return policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
def test_update_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
n_exc.QosRuleNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id,
self.rule_data)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'delete')
def test_delete_policy_rule(self, mock_qos_rule_delete):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, _policy.id)
self._validate_driver_params('update_policy')
rule_delete_mock_call = mock.call.delete()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_delete_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_delete_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
n_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, _policy.id)
def test_get_policy_bandwidth_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_bandwidth_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy,
self.ctxt, self.policy.id)
def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.create_policy_dscp_marking_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy')
def test_update_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.update_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy')
def test_delete_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.delete_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id)
self._validate_driver_params('update_policy')
def test_get_policy_dscp_marking_rules(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_dscp_marking_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, qos_policy_id=self.policy.id,
_pager=mock.ANY, filter='filter_id')
def test_get_policy_dscp_marking_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rule,
self.ctxt, self.dscp_rule.id, self.policy.id)
def test_get_policy_dscp_marking_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rules,
self.ctxt, self.policy.id)
def test_get_policy_minimum_bandwidth_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_minimum_bandwidth_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_minimum_bandwidth_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rules,
self.ctxt, self.policy.id)
def test_create_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
def test_delete_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_verify_bad_method_call(self):
self.assertRaises(AttributeError, getattr, self.qos_plugin,
'create_policy_bandwidth_limit_rules')
def test_get_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kbps',
'parameter_type': constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def test_get_rule_types(self):
rule_types_mock = mock.PropertyMock(
return_value=qos_consts.VALID_RULE_TYPES)
filters = {'type': 'type_id'}
with mock.patch.object(qos_plugin.QoSPlugin, 'supported_rule_types',
new_callable=rule_types_mock):
types = self.qos_plugin.get_rule_types(self.ctxt, filters=filters)
self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES),
sorted(type_['type'] for type_ in types))
@mock.patch('neutron.objects.ports.Port')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_rule_notification_and_driver_ordering(self, qos_policy_mock,
port_mock):
rule_cls_mock = mock.Mock()
rule_cls_mock.rule_type = 'fake'
rule_actions = {'create': [self.ctxt, rule_cls_mock,
self.policy.id, {'fake_rule': {}}],
'update': [self.ctxt, rule_cls_mock,
self.rule.id,
self.policy.id, {'fake_rule': {}}],
'delete': [self.ctxt, rule_cls_mock,
self.rule.id, self.policy.id]}
mock_manager = mock.Mock()
mock_manager.attach_mock(qos_policy_mock, 'QosPolicy')
mock_manager.attach_mock(port_mock, 'Port')
mock_manager.attach_mock(rule_cls_mock, 'RuleCls')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
for action, arguments in rule_actions.items():
mock_manager.reset_mock()
method = getattr(self.qos_plugin, "%s_policy_rule" % action)
method(*arguments)
# some actions get rule from policy
get_rule_mock_call = getattr(
mock.call.QosPolicy.get_object().get_rule_by_id(), action)()
# some actions construct rule from class reference
rule_mock_call = getattr(mock.call.RuleCls(), action)()
driver_mock_call = mock.call.driver.call('update_policy',
self.ctxt, mock.ANY)
if rule_mock_call in mock_manager.mock_calls:
action_index = mock_manager.mock_calls.index(rule_mock_call)
else:
action_index = mock_manager.mock_calls.index(
get_rule_mock_call)
self.assertTrue(
action_index < mock_manager.mock_calls.index(driver_mock_call))
|
{
"content_hash": "59a7b3f83a2db9a88a739f76dcbc61e5",
"timestamp": "",
"source": "github",
"line_count": 884,
"max_line_length": 79,
"avg_line_length": 47.522624434389144,
"alnum_prop": 0.5860271363960962,
"repo_name": "eayunstack/neutron",
"id": "509e7e0ce11ff50d951be5f3b86a5b8a213060d5",
"size": "42583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/services/qos/test_qos_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('emit_series')
try:
from flexget.plugins.filter.series import SeriesTask, Episode, Release, get_latest_download
except ImportError as e:
log.error(e.message)
raise plugin.DependencyError(issued_by='emit_series', missing='series')
class EmitSeries(object):
"""
Emit next episode number from all series configured in this task.
Supports only 'ep' and 'sequence' mode series.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'from_start': {'type': 'boolean', 'default': False}
},
'additionalProperties': False
}
]
}
def ep_identifiers(self, season, episode):
return ['S%02dE%02d' % (season, episode),
'%dx%02d' % (season, episode)]
def sequence_identifiers(self, episode):
return ['%d' % episode]
def search_entry(self, series, season, episode, task, rerun=True):
if series.identified_by == 'ep':
search_strings = ['%s %s' % (series.name, id) for id in self.ep_identifiers(season, episode)]
series_id = 'S%02dE%02d' % (season, episode)
else:
search_strings = ['%s %s' % (series.name, id) for id in self.sequence_identifiers(episode)]
series_id = episode
entry = Entry(title=search_strings[0], url='',
search_strings=search_strings,
series_name=series.name,
series_season=season,
series_episode=episode,
series_id=series_id,
series_id_type=series.identified_by)
if rerun:
entry.on_complete(self.on_search_complete, task=task, identified_by=series.identified_by)
return entry
def on_task_input(self, task, config):
if not config:
return
if isinstance(config, bool):
config = {}
if not task.is_rerun:
self.try_next_season = {}
entries = []
for seriestask in task.session.query(SeriesTask).filter(SeriesTask.name == task.name).all():
series = seriestask.series
if not series:
# TODO: How can this happen?
log.debug('Found SeriesTask item without series specified. Cleaning up.')
task.session.delete(seriestask)
continue
if series.identified_by not in ['ep', 'sequence']:
log.verbose('Can only emit ep or sequence based series. `%s` is identified_by %s' %
(series.name, series.identified_by or 'auto'))
continue
latest = get_latest_download(series)
if series.begin and (not latest or latest < series.begin):
entries.append(self.search_entry(series, series.begin.season, series.begin.number, task))
elif latest:
if self.try_next_season.get(series.name):
entries.append(self.search_entry(series, latest.season + 1, 1, task))
else:
start_at_ep = 1
episodes_this_season = (task.session.query(Episode).
filter(Episode.series_id == series.id).
filter(Episode.season == latest.season))
if series.identified_by == 'sequence':
# Don't look for missing too far back with sequence shows
start_at_ep = max(latest.number - 10, 1)
episodes_this_season = episodes_this_season.filter(Episode.number >= start_at_ep)
latest_ep_this_season = episodes_this_season.order_by(desc(Episode.number)).first()
downloaded_this_season = (episodes_this_season.join(Episode.releases).
filter(Release.downloaded == True).all())
# Calculate the episodes we still need to get from this season
if series.begin and series.begin.season == latest.season:
start_at_ep = max(start_at_ep, series.begin.number)
eps_to_get = range(start_at_ep, latest_ep_this_season.number + 1)
for ep in downloaded_this_season:
try:
eps_to_get.remove(ep.number)
except ValueError:
pass
entries.extend(self.search_entry(series, latest.season, x, task, rerun=False) for x in eps_to_get)
# If we have already downloaded the latest known episode, try the next episode
if latest_ep_this_season.downloaded_releases:
entries.append(self.search_entry(series, latest.season, latest_ep_this_season.number + 1, task))
else:
if config.get('from_start'):
season = 1 if series.identified_by == 'ep' else 0
entries.append(self.search_entry(series, season, 1, task))
else:
log.verbose('Series `%s` has no history. Set begin option, or use CLI `series begin` subcommand '
'to set first episode to emit' % series.name)
continue
return entries
def on_search_complete(self, entry, task=None, identified_by=None, **kwargs):
if entry.accepted:
# We accepted a result from this search, rerun the task to look for next ep
self.try_next_season.pop(entry['series_name'], None)
task.rerun()
else:
if identified_by != 'ep':
# Do not try next season if this is not an 'ep' show
return
if entry['series_name'] not in self.try_next_season:
self.try_next_season[entry['series_name']] = True
task.rerun()
else:
# Don't try a second time
self.try_next_season[entry['series_name']] = False
@event('plugin.register')
def register_plugin():
plugin.register(EmitSeries, 'emit_series', api_ver=2)
|
{
"content_hash": "b763f5c76b38dbe0ca7e7d35fe72a331",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 120,
"avg_line_length": 44.91724137931035,
"alnum_prop": 0.5436818670351604,
"repo_name": "asm0dey/Flexget",
"id": "133713f415071bb3224f5b7281853ba6644b50e6",
"size": "6513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexget/plugins/input/emit_series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "1670496"
}
],
"symlink_target": ""
}
|
import logging
import constants
import dignities
import translate
sign_idx = 0
deg_idx = 1
sun_idx = 0
class AstraChart:
def __init__(self, chartname, person, chart_data):
self.chart_degrees = None
self.chartname = chartname
self.person = person
self.chart_data = chart_data # Currently the chart data is a dict, see chartData.py
def print_info(self):
print(self.chartname + " for " + self.person)
print()
print(self.chart_data)
def chartname(self):
print("The chart's name is: ", self.chartname)
return self.chartname
def chart_for(self):
print("The chart is for: ", self.person)
return self.person
def sun_sign(self):
sun_planet = constants.PLANETS[sun_idx]
return self.chart_data[sun_planet][sign_idx]
def sun_sign_degree(self):
sun_planet = constants.PLANETS[sun_idx]
return self.chart_data[sun_planet][deg_idx]
def find_planets_at_sign_and_degree(self, sign_name, sign_deg, cap):
'''Return list of planets found or empty list. By sign and degree (Alt: 360 degree)'''
# logging.info("Looking for planets at " + str(sign_deg) + ' ' + sign_name)
found_planets = []
for planet in constants.PLANETS:
chart_sign = self.chart_data[planet][0]
chart_deg = self.chart_data[planet][1]
if chart_sign == sign_name and str(chart_deg) == str(sign_deg):
# logging.info("Found Planet " + planet + " at " + str(chart_deg) + " of " + chart_sign)
if cap:
found_planets.append(planet.capitalize())
else:
found_planets.append(planet)
return found_planets
def find_planet_dignity_scores(self, planet_list, sign):
planet_dignities = {}
sign_dignity = {}
for planet in planet_list:
dignity = dignities.get_planet_dignity(planet.upper(), sign.upper())
dscore = dignities.get_planet_dignity_score(planet.upper(), dignity, sign.upper())
planet_dignities[planet] = {sign: (dignity, dscore)}
return planet_dignities
def find_aspect_center_at_360_degree(self, orbs_by_planet, deg, cap):
'''Return a list of planet aspects on the 360 degree chart (Alt: by sign and degree)'''
found_aspects = []
for planet in constants.PLANETS:
chart_aspect_list = orbs_by_planet[planet]
# logging.debug(chart_aspect_list)
for aspect in constants.ASPECTS:
deg_list = chart_aspect_list[aspect] # like [308. 324]
# logging.debug("Deg list:" + str(chart_aspect_list[aspect]))
if deg in deg_list:
logging.debug("Found: " + planet + " " + aspect + " at " + str(deg))
if cap:
found_aspects.append(planet.capitalize() + " " + aspect.capitalize())
else:
found_aspects.append(planet + " " + aspect)
return found_aspects
def translate_chart_to_degrees_for_planets(self):
'''Return the chart with the 360 degree version'''
chart_degrees = translate.translateChartToDegreesForPlanets(self.chart_data)
self.chart_degrees = chart_degrees
return chart_degrees
def get_chart_in_360degrees_for_planets(self):
'''Get or set and get the chart in 360 Degrees by planets'''
if self.chart_degrees:
return self.chart_degrees
else:
return self.translate_chart_to_degrees_for_planets()
|
{
"content_hash": "d88cfc379bd4142bad671dc60516a84b",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 104,
"avg_line_length": 37.13265306122449,
"alnum_prop": 0.5916460566089585,
"repo_name": "astraknots/ncis",
"id": "96e8dfa2d85a63255a149315aff5455a34bd366f",
"size": "3657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AstraChart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "181688"
},
{
"name": "Rich Text Format",
"bytes": "584"
}
],
"symlink_target": ""
}
|
import sys
from fabric.api import *
from fabric.colors import green, red, green, yellow
from fabric.contrib.files import exists
from fabric.contrib import *
from django.template import Template
import os
import layout_settings
#set_operating_system()
"""
Basic commands:
"""
def show_help():
"""
help
"""
print "\nfab jscomp|css|static"
def jscomp():
local("ng-annotate -a StarterApp.js > StarterApp-Annotated.js")
local("java -jar ~/bin/closure-compiler/compiler.jar StarterApp-Annotated.js > StarterApp-Compiled.js")
def css():
local("java -jar ~/bin/closure-compiler/closure-stylesheets-20111230.jar local.css -o local.min.css")
def build():
print "rendering..."
local("rm -rf ./build")
local("mkdir ./build")
local("cp -a ./base/. ./build")
local("cp -a ./dev/. ./build")
local_template_render('./base/ng/app/templates/index.tpl.html',
'./build/index.html', layout_settings.load_base_configuration())
def startserverlocal():
print "Starting local..."
local("python -m SimpleHTTPServer 4000 > temp.log &")
local("open -a '/Applications/Google Chrome.app' http://localhost:4000/#/")
print "\nstarting server in http://localhost:4000/"
def startserverlocalcomp():
local("rm -rf %s/%s" % ('.', 'build'))
local("mkdir %s/%s" % ('.', 'build'))
print "Annotating JS..."
local("ng-annotate -a ./bin/ng/FwApp.js > ./build/ng/FwApp-Annotated.js")
local("ng-annotate -a ./work/ng/WorkApp.js > ./build/ng/WorkApp-Annotated.js")
print "Compiling fw JS..."
local("java -jar ~/bin/closure-compiler/compiler.jar ./build/ng/FwApp-Annotated.js > ./build/ng/FwApp-Compiled.js")
local("java -jar ~/bin/closure-compiler/compiler.jar ./build/ng/WorkApp-Annotated.js > ./build/ng/WorkApp-Compiled.js")
print "Compiling CSS..."
local("java -jar ~/bin/closure-compiler/closure-stylesheets-20111230.jar ./bin/css/local.css -o ./build/css/local.min.css")
print "Starting..."
def stopserver():
print "Stopping..."
local("lsof -P | grep ':4000' | awk '{print $2}' | xargs kill -9")
def static():
css()
jscomp()
stopserver()
startserverlocal()
def local_template_render(source_file, dest_file, params):
with open(source_file, 'r') as source:
rendered = source.read().format(**params)
with open(dest_file, 'w') as dest:
dest.write(rendered)
|
{
"content_hash": "b5a69a5de95bc2fc17393a4c38d37b79",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 127,
"avg_line_length": 25.526315789473685,
"alnum_prop": 0.6511340206185567,
"repo_name": "Miki-AG/ngLaunchpad",
"id": "797ca479ca6711f234d8f455444fc8f6709cc8a9",
"size": "2425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v0.1/core/fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36426"
},
{
"name": "HTML",
"bytes": "35454"
},
{
"name": "JavaScript",
"bytes": "39159"
},
{
"name": "Python",
"bytes": "5185"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
__version__ = "0.6"
default_app_config = 'baldr.app.BaldrAppConfig'
def _ensure_registration():
"""
Ensure that model type resolvers and validation tools are registered with Odin.
This provides support for Django Models and use of Django Validators in Odin resources.
"""
from . import models # NoQA
_ensure_registration()
|
{
"content_hash": "9fe5abb8a937ab5e2db59fe6333326b9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 91,
"avg_line_length": 27.642857142857142,
"alnum_prop": 0.7131782945736435,
"repo_name": "timsavage/baldr",
"id": "fb53e692a01921bb061c984f21e096aafabab419",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baldr/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "73331"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
}
|
"""Classificateur de publications.
Classifie les publications reçues suivant le médium
d'apparition de celles-ci (voir publications.py pour
les types de publications gérés.
"""
import re
__author__ = "Anas Hilama, Moncef Baazet"
class Classifier(object):
def __init__(self, pubs_types):
"""Construit un classificateur, qui est en fait un dictionnaire
associant à chaque expression régulière le type de publication
qu'elle reconnait."""
self.classtor = {}
for pty in pubs_types:
for i, regexp in enumerate(pty.REGEXPS):
self.classtor[regexp] = (pty, pty.REGEXPS_OPTS[i])
def get_classified(self, input_gen):
"""Génére, pour chaque publication fournie par input_gen,
une représentation de cette publication classifiée, suivant
qu'elle représente une publication de conférence, de journal, etc."""
for pub in input_gen:
for regxp, (pub_type, regxp_opt) in self.classtor.items():
if regxp_opt == -1:
res = re.search(regxp, pub)
else:
res = re.search(regxp, pub, regxp_opt)
if res:
yield (pub_type, pub)
break
|
{
"content_hash": "02118bac8cef73ca84d212664cbdba30",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 36.542857142857144,
"alnum_prop": 0.5934323690383112,
"repo_name": "mobajm/resextract",
"id": "f74cda0c5f72aa62b51f124174a3a35e9b6fa1f9",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classifier.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15018"
}
],
"symlink_target": ""
}
|
import log
MAGIC = "DEADBEEF"
ADD = "ADD"
REMOVE = "REMOVE"
TOKEN = ";"
logger = log.setup_logger("StringParser")
def validate_string(input_string):
if not input_string:
logger.error("String is null")
return
output_string = input_string.strip().upper()
if not output_string.startswith(MAGIC):
logger.error("Magic Header not found "+MAGIC)
return
if not output_string.endswith((ADD,REMOVE)):
logger.error("Doesnt end with add/remove")
return
return output_string
def split_string(input_string):
str_list = input_string.split(TOKEN)
token_count = len(str_list)-1
if token_count<3 or token_count>10:
logger.error("Is something wrong? token count is "+str(token_count))
return
return str_list[1:]
test_strings = (""," Hello ! ","DEADBEEF;Hello ! ","DEADBEEF;Hello ! adD", "DEADBEEF;Hello ! REMOVE",
"DEADBEEF;ABC1;A1;Floor1;Loc2;ADD")
def main():
for string in test_strings:
result = validate_string(string)
print(result)
if result:
list_str = split_string(result)
if list_str:
print(list_str)
if __name__ == '__main__':
main()
|
{
"content_hash": "e5a38f421796402b5893a7163958317b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 103,
"avg_line_length": 28.186046511627907,
"alnum_prop": 0.6138613861386139,
"repo_name": "bobbyphilip/learn_python",
"id": "d8dccddc0a9abe34961b5009719e33716cf0cfd1",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailParser/string_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "65535"
}
],
"symlink_target": ""
}
|
import os
from .tc import *
from . import build
from . import monkey
class Re2cTask(build.BuildTask):
def __init__(self, project, sourcePath, builtDir, workingDir, toolchain, outputExt='.cpp'):
super().__init__(project)
self.sourcePath = sourcePath
self.builtDir = builtDir
self.workingDir = workingDir
self.toolchain = toolchain
# commandline options
self.useBitVectors = True # -b
#self.conditionSupport = False # -c
self.debug = False # -d
self.emitDot = False # -D
self.storableState = False # -f
self.flexSyntax = False # -F
self.computedGoto = False # -g
self.lineMappings = True # -i when False
self.reuse = False # -r
self.nestedIfs = False # -s
#self.flexHeader = False # -t
self.charType = 'ascii' # valid options are { 'ascii', 'ucs2', 'utf32' }
# for -w -u
self.singlePass = False # -1
self._calc_output(outputExt)
def emit(self):
project = self.project
toolchain = self.toolchain
toolchain.emit_build(project, self)
if self.phonyTarget:
project.projectMan.add_phony_target(self.phonyTarget, self.outputPath)
def _calc_output(self, outputExt):
sourcePath = os.path.normpath(self.sourcePath).replace("\\", "/")
basename = os.path.basename(sourcePath)
if os.path.isabs(sourcePath):
self.sourcePath = sourcePath
self.outputPath = os.path.join(self.builtDir, basename + outputExt)
else:
self.sourcePath = os.path.join(self.workingDir, sourcePath).replace("\\", "/")
self.outputPath = os.path.join(self.builtDir, os.path.dirname(sourcePath), basename + outputExt)
class Re2cToolChain(build.ToolChain):
"""re2c lexer generator driver."""
def __init__(self, re2cPath):
super().__init__("re2c")
self._re2cPath = re2cPath
def emit_rules(self, ninjaFile):
ninjaFile.write("#############################################\n")
ninjaFile.write("# re2c\n")
ninjaFile.write("\n")
ninjaFile.write("rule re2c\n")
ninjaFile.write(" depfile = $DEP_FILE\n")
ninjaFile.write(" command = \"%s\" -$OPTIONS -o \"$OUT_FILE\" \"$SRC_FILE\" \n" % (self._re2cPath))
ninjaFile.write(" description = re2c $DESC\n")
ninjaFile.write(" restat = 1\n")
ninjaFile.write("\n")
def emit_build(self, project, task):
options = []
if task.useBitVectors:
options.append("s")
if task.debug:
options.append("d")
if task.emitDot:
options.append("D")
if task.storableState:
options.append("f")
if task.flexSyntax:
options.append("F")
if task.computedGoto:
options.append("g")
if not task.lineMappings:
options.append("i")
if task.reuse:
options.append("r")
if task.nestedIfs:
options.append("s")
if task.singlePass:
options.append("1")
if task.charType == 'ucs2':
options.append("w")
elif task.charType == 'utf32':
options.append("u")
else: # catch-all, handles default behavior of task.charType == 'ascii':
pass
# emit ninja file contents
ninjaFile = project.projectMan.ninjaFile
outputPath = build.ninja_esc_path(task.outputPath)
sourcePath = build.ninja_esc_path(task.sourcePath)
re2cPath = build.ninja_esc_path(self._re2cPath)
sourceName = os.path.basename(task.sourcePath)
outputName = os.path.basename(task.outputPath)
optionsStr = "".join(options)
extraOutputs = " ".join([build.ninja_esc_path(p) for p in task.extraOutputs])
# write build command
ninjaFile.write("build %(outputPath)s %(extraOutputs)s : re2c %(sourcePath)s | %(re2cPath)s" % locals())
build.translate_extra_deps(ninjaFile, project, task, False)
build.translate_order_only_deps(ninjaFile, project, task, True)
ninjaFile.write("\n")
ninjaFile.write(" SRC_FILE = %s\n" % task.sourcePath)
ninjaFile.write(" OUT_FILE = %s\n" % task.outputPath)
ninjaFile.write(" OPTIONS = %s\n" % optionsStr)
ninjaFile.write(" DESC = %s -> %s\n" % (sourceName, outputName))
ninjaFile.write("\n")
def add_tool(cls):
def _re2c_one(self, sourcePath, ext=".cpp"):
re2cToolChain = self.projectMan.get_toolchain("re2c")
task = Re2cTask(self, sourcePath, self.builtDir, self.projectDir, re2cToolChain, ext)
self.set_re2c_options(task)
self._forcedDeps.add(task.outputPath)
return task
@monkey.new_method(cls)
def re2c(self, filePaths, ext):
if isinstance(filePaths, str):
return _re2c_one(self, filePaths, ext)
else:
taskList = []
for filePath in filePaths:
task = _re2c_one(self, filePath, ext)
taskList.append(task)
tasks = build.BuildTasks(taskList)
return tasks
@monkey.new_method(cls)
def re2c_cpp_compile(self, filePaths, ext=".cpp"):
re2c_sources = []
with self.re2c(filePaths, ext) as tasks:
for task in tasks:
re2c_sources.append(task.outputPath)
# undo the forced dependency since we no file will #include the outputPath
self._forcedDeps.remove(task.outputPath)
self.cpp_compile(re2c_sources)
return re2c_sources
@monkey.new_method(cls)
def set_re2c_options(self, task):
pass
|
{
"content_hash": "2d9d682f9bee516cf83fe7c323804e52",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 113,
"avg_line_length": 39.30263157894737,
"alnum_prop": 0.5612654837629729,
"repo_name": "fifoforlifo/pynja",
"id": "938a8dcc1f388647c4d0e19e0b6e7072189e62a3",
"size": "5974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/pynja/re2c.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1431"
},
{
"name": "C++",
"bytes": "2971018"
},
{
"name": "Java",
"bytes": "701"
},
{
"name": "Protocol Buffer",
"bytes": "353085"
},
{
"name": "Python",
"bytes": "190328"
},
{
"name": "Shell",
"bytes": "5634"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
import json
def forwards(apps, schema_editor):
ch_m = apps.all_models['chat']
Message = ch_m['message']
with open('old_smileys_info.json') as old_file:
data_old = json.load(old_file)
with open('chat/static/smileys/info.json') as new_file:
data_new = json.load(new_file)
def find_old_entry(alt):
for k in data_old:
for smile in data_old[k]:
if data_old[k][smile]['text_alt'] == alt:
return smile
raise Exception("Not {} found")
output = {}
for k in data_new:
for smile in data_new[k]:
entry = find_old_entry(smile['alt'])
output[json.dumps(entry)] = smile['code']
messages = Message.objects.all()
updated_count = 0
updated_smileys = 0
for mess in messages:
output_content = ""
for char in mess.content:
get = output.get(json.dumps(char))
if get is not None:
updated_smileys+=1
output_content += get
else:
output_content += char
if mess.content != output_content:
mess.content = output_content
mess.save(update_fields=["content"])
updated_count += 1
print("Updated " + str(updated_count) + " , total smielys: " + str(updated_smileys))
class Migration(migrations.Migration):
dependencies = [('chat', '0005_add_symbol_20170707_1213'), ]
operations = [
migrations.RunPython(forwards, hints={'target_db': 'default'}),
]
|
{
"content_hash": "02b5887c715512c61f4f3ded5b242f79",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 88,
"avg_line_length": 30.037037037037038,
"alnum_prop": 0.5690505548705302,
"repo_name": "Deathangel908/djangochat",
"id": "e7f234c5064984d93165890d3a1dadc5134b182d",
"size": "1695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/migrations/0006_smileys_20171231_0400.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13457"
},
{
"name": "C++",
"bytes": "430"
},
{
"name": "CMake",
"bytes": "11600"
},
{
"name": "Dockerfile",
"bytes": "5290"
},
{
"name": "EJS",
"bytes": "3530"
},
{
"name": "HTML",
"bytes": "28977"
},
{
"name": "Java",
"bytes": "307689"
},
{
"name": "JavaScript",
"bytes": "51012"
},
{
"name": "Python",
"bytes": "169328"
},
{
"name": "SCSS",
"bytes": "7631"
},
{
"name": "Sass",
"bytes": "18064"
},
{
"name": "Shell",
"bytes": "27650"
},
{
"name": "TypeScript",
"bytes": "1107341"
},
{
"name": "Vue",
"bytes": "280246"
}
],
"symlink_target": ""
}
|
'''
This file shows examples on how to use the scripts in project
'''
from fileparser import do_one_pdb
from Config import *
import os,sys,io
import gzip
from vector_gen import pdb_container,fake_pdb_container
from mapping import *
from job_dispatcher import dock_dispatcher
#PDB_tar=['1uwj']
#do_one_pdb('100d')
#A= pdb_container('100d',filepos=os.path.join(pdb_PREFIX,'1j8q.pdb.gz'))
#print A.set_vina_benchmark('147')
#a= fake_pdb_container('aa2ar',filepos=fake_src_PREFIX+'aa2ar/'+fake_pdb_name)
#filenames = os.listdir(fake_hetero_PREFIX)
#for filename in filenames:
# if filename.split('.')[-1]=='pdb':
# a.append_vectors(os.path.join(fake_hetero_PREFIX,filename))
def quick_split(pdb):
pdb = pdb.lower()
fake_pdb_container(pdb,filepos=os.path.join(pdb_PREFIX,pdb+'.pdb.gz'))
from source import PDB_protein_tar
for PDB in PDB_protein_tar[0:100]:
quick_split(PDB.lower())
quick_split('1avd')
'''
config = {
'jobname': 'fast',
'filedir': '/media/wy/data/fast/',
'benchmark': '/media/wy/data/fast/'
}
A= dock_dispatcher(jobname='rigor',filedir= '/media/wy/data/rigor/', benchmark= '/media/wy/data/benchmark/')
A.do_one_ligand('1jdc','419')
'''
|
{
"content_hash": "8901577662d44dbd409871fdd8baf5cc",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 108,
"avg_line_length": 24.875,
"alnum_prop": 0.6909547738693468,
"repo_name": "Reimilia/pdb_sth",
"id": "9b6c865e42e3f82090f9e5166b805ee27cbfac75",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389896"
}
],
"symlink_target": ""
}
|
import sys
import datetime as dt
from optparse import OptionParser
from bigdl.optim.optimizer import Adagrad
from zoo.common.nncontext import init_nncontext
from zoo.feature.text import TextSet
from zoo.models.textclassification import TextClassifier
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--data_path", dest="data_path")
parser.add_option("--embedding_path", dest="embedding_path")
parser.add_option("--class_num", dest="class_num", default="20")
parser.add_option("--partition_num", dest="partition_num", default="4")
parser.add_option("--token_length", dest="token_length", default="200")
parser.add_option("--sequence_length", dest="sequence_length", default="500")
parser.add_option("--max_words_num", dest="max_words_num", default="5000")
parser.add_option("--encoder", dest="encoder", default="cnn")
parser.add_option("--encoder_output_dim", dest="encoder_output_dim", default="256")
parser.add_option("--training_split", dest="training_split", default="0.8")
parser.add_option("-b", "--batch_size", dest="batch_size", default="128")
parser.add_option("-e", "--nb_epoch", dest="nb_epoch", default="20")
parser.add_option("-l", "--learning_rate", dest="learning_rate", default="0.01")
parser.add_option("--log_dir", dest="log_dir", default="/tmp/.analytics-zoo")
parser.add_option("-m", "--model", dest="model")
parser.add_option("--output_path", dest="output_path")
(options, args) = parser.parse_args(sys.argv)
sc = init_nncontext("Text Classification Example")
text_set = TextSet.read(path=options.data_path).to_distributed(sc, int(options.partition_num))
print("Processing text dataset...")
transformed = text_set.tokenize().normalize()\
.word2idx(remove_topN=10, max_words_num=int(options.max_words_num))\
.shape_sequence(len=int(options.sequence_length)).generate_sample()
train_set, val_set = transformed.random_split(
[float(options.training_split), 1 - float(options.training_split)])
if options.model:
model = TextClassifier.load_model(options.model)
else:
token_length = int(options.token_length)
if not (token_length == 50 or token_length == 100
or token_length == 200 or token_length == 300):
raise ValueError('token_length for GloVe can only be 50, 100, 200, 300, but got '
+ str(token_length))
embedding_file = options.embedding_path + "/glove.6B." + str(token_length) + "d.txt"
word_index = transformed.get_word_index()
model = TextClassifier(int(options.class_num), embedding_file,
word_index, int(options.sequence_length),
options.encoder, int(options.encoder_output_dim))
model.compile(optimizer=Adagrad(learningrate=float(options.learning_rate),
learningrate_decay=0.001),
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
app_name = 'textclassification-' + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
model.set_tensorboard(options.log_dir, app_name)
model.fit(train_set, batch_size=int(options.batch_size),
nb_epoch=int(options.nb_epoch), validation_data=val_set)
predict_set = model.predict(val_set, batch_per_thread=int(options.partition_num))
# Get the first five prediction probability distributions
predicts = predict_set.get_predicts().take(5)
print("Probability distributions of the first five texts in the validation set:")
for predict in predicts:
(uri, probs) = predict
print("Prediction for " + uri + ": ")
print(probs)
if options.output_path:
model.save_model(options.output_path + "/text_classifier.model")
transformed.save_word_index(options.output_path + "/word_index.txt")
print("Trained model and word dictionary saved")
sc.stop()
|
{
"content_hash": "83416b458c36b140683226318003ad27",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 98,
"avg_line_length": 52.56578947368421,
"alnum_prop": 0.6523153942428035,
"repo_name": "intel-analytics/analytics-zoo",
"id": "9baec7d45743f20ebab5ec90f0ffcfc6aef996c7",
"size": "4586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/examples/textclassification/text_classification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
import boto3
from datetime import datetime
import json
from boto3.dynamodb.conditions import Key
from enum import Enum
from decimal import Decimal
from sns_boomerang.settings import util, TABLE_JOBS, TABLE_TOPICS
dynamo = boto3.resource('dynamodb')
sns_resource = boto3.resource('sns')
sns_client = boto3.client('sns')
JOB_TABLE = dynamo.Table(TABLE_JOBS)
TOPIC_TABLE = dynamo.Table(TABLE_TOPICS)
def utc_now():
return datetime.utcnow().timestamp()
class SubscriptionType(Enum):
"""subscription type that is supported"""
HTTPS = 'https'
LAMBDA = 'lambda'
SQS = 'sqs'
class Job():
"""
job class
"""
def __init__(self, topic, payload, time_due, id='', version=1, is_valid=1, time_scheduled=None, **kwargs):
"""
:type id: string
"""
self.topic = topic
self.payload = payload
self.time_due = time_due
self.version = version
self.is_valid = is_valid
self.time_scheduled = time_scheduled or int(utc_now())
self.id = id or util.compute_hash(payload, version, topic, time_due)
def add_or_update(self):
"""update to add scheduled job"""
self.time_scheduled = int(utc_now())
JOB_TABLE.put_item(Item=self.__dict__)
@classmethod
def get(cls, id, check_valid=False):
"""get job by id"""
item_response = JOB_TABLE.get_item(
Key={'id': id}
)
if item_response.get('Item'):
item = item_response['Item']
if not check_valid or item.get('is_valid'):
return cls(**item)
return None
@classmethod
def from_stream_record(cls, record):
"""get job by dynamo stream image"""
parsed_values = {
'id': record.get("id", {}).get('S', ''),
'version': record.get("version", {}).get('N', 1),
'payload': record.get("payload", {}).get('S', ''),
'topic': record.get("topic", {}).get('S', ''),
'time_due': record.get("time_due", {}).get('N', 0),
'time_scheduled': record.get("time_scheduled", {}).get('N', 0),
'is_valid': record.get("is_valid", {}).get('N', 0)
}
return cls(**parsed_values)
@staticmethod
def flush():
"""
:return:
"""
current = int(utc_now())
jobs_response = JOB_TABLE.query(
IndexName='is_valid-time_due-index',
KeyConditionExpression=Key('is_valid').eq(
1) & Key('time_due').lt(current)
)
items = jobs_response.get('Items')
if items:
with JOB_TABLE.batch_writer() as batch:
for i in items:
batch.delete_item(Key={'id': i.get('id'), 'is_valid': 1})
def publish(self):
"""
sns_resource could only accept json object in message
:param topic_arn: arn
:param payload: string of json payload
:param version: default version to 1
:return:
"""
job_topic = Topic.get(self.topic)
if not job_topic.arn:
return {'error': 'arn can not be none'}
if not self.payload:
return {'error': 'payload empty'}
message = json.dumps({'payload': json.loads(self.payload)})
sns_client.publish(
TopicArn=job_topic.arn,
Message=json.dumps({
'default': message,
'version': self.version
}),
MessageStructure='json'
)
class Topic():
"""
topic class
"""
def __init__(self, topic, arn='', time_updated=None, is_active=True):
self.time_updated = time_updated or int(utc_now())
self.topic = topic
self.arn = arn
self.is_active = is_active
@classmethod
def get(cls, topic, check_is_active=False):
"""get topic by topic name"""
item_response = TOPIC_TABLE.get_item(
Key={'topic': topic}, ConsistentRead=True)
if item_response.get('Item'):
item = item_response['Item']
if not check_is_active or item.get('is_active'):
return cls(**item)
def add_or_update(self):
"""
update or add current topic
:return:
"""
self.arn = self.arn or self._create_sns_topic_arn(self.topic)
self.time_updated = int(utc_now())
topic_item = self.__dict__
TOPIC_TABLE.put_item(Item=topic_item)
return topic_item
def list_jobs(self, version):
jobs_response = JOB_TABLE.query(
IndexName='topic-version-index',
KeyConditionExpression=Key('topic').eq(
self.topic) & Key('version').eq(version)
)
items = jobs_response.get('Items', [])
return items
@staticmethod
def _create_sns_topic_arn(sns_topic):
topic_response = sns_client.create_topic(
Name=sns_topic
)
if topic_response:
return topic_response.get('TopicArn', '')
class TopicSubscriptions():
"""
subscription per topic
"""
def __init__(self, topic):
topic = Topic.get(topic, check_is_active=True)
if not topic:
raise NameError("topic: {} dose not exists".format(topic.topic))
self.topic = sns_resource.Topic(topic.arn)
def lists(self):
"""
list all subscription
"""
subscription_iterator = self.topic.subscriptions.all()
return subscription_iterator
def remove(self, subscription_arn):
"""remove subscription by arn"""
subscription = sns_resource.Subscription(subscription_arn)
subscription.delete()
return True
def add(self, subscription_type=SubscriptionType.SQS.value, endpoint_arn=''):
"""
add subscriptions by type
"""
response = self.topic.subscribe(Protocol=subscription_type,
Endpoint=endpoint_arn)
return response
|
{
"content_hash": "fe3a0847049200abcd60b7f0629c559e",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 110,
"avg_line_length": 30.231155778894472,
"alnum_prop": 0.555186170212766,
"repo_name": "zdjohn/sns-boomerang",
"id": "99bb3f9e91a2b5ae3913af2b43c78b8bbb92da2a",
"size": "6016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sns_boomerang/common/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25409"
}
],
"symlink_target": ""
}
|
import sys
from oauth2client.client import OAuth2Credentials
from printapp import app, mongo, oauthcredentials
def add_fake_credentials(email, hasprinter=True):
token = 'fakeoauth.py|noprinter'
if hasprinter:
token = 'fakeoauth.py'
credentials = OAuth2Credentials(token, token, token,
token, None, token, token)
with app.app_context():
oauthcredentials._save_credentials(email, credentials)
def remove_credentials(email):
with app.app_context():
mongo.db.credentials.remove({'email': email})
if __name__ == '__main__':
usage = 'Usage: {} username [delete|noprinter]'.format(sys.argv[0])
if len(sys.argv) not in (2, 3):
print usage
else:
if len(sys.argv) == 3:
command = sys.argv[2]
else:
command = 'add'
email = sys.argv[1]
if '@' not in email:
email = email + '@students.calvin.edu'
if command == 'add':
add_fake_credentials(email)
elif command == 'noprinter':
add_fake_credentials(email, hasprinter=False)
elif command in ('delete', 'remove') :
remove_credentials(email)
else:
print usage
|
{
"content_hash": "43cc461a83439828f749ac907c124522",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 71,
"avg_line_length": 31.225,
"alnum_prop": 0.5836669335468375,
"repo_name": "jglamine/calvinwebprint",
"id": "f2feb704062b30414f527c549314793693eea985",
"size": "1396",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/fakeoauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1311"
},
{
"name": "HTML",
"bytes": "23038"
},
{
"name": "JavaScript",
"bytes": "21052"
},
{
"name": "Python",
"bytes": "68855"
}
],
"symlink_target": ""
}
|
r"""Implements the general form of the loss.
This is the simplest way of using this loss. No parameters will be tuned
automatically, it's just a simple function that takes in parameters (likely
hand-tuned ones) and return a loss. For an adaptive loss, look at adaptive.py
or distribution.py.
"""
import jax
import jax.numpy as jnp
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def lossfun(x, alpha, scale):
r"""Implements the general form of the loss.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
|
{
"content_hash": "561ff7b9ecd562fb6d95c90d740552b6",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 38.02298850574713,
"alnum_prop": 0.6635429262394196,
"repo_name": "google-research/google-research",
"id": "5e34a953cde5d316a917e6da26f4b229f0d3724b",
"size": "3916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robust_loss_jax/general.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
__version__='''$Id$'''
__doc__="""most basic test possible that makes a PDF.
Useful if you want to test that a really minimal PDF is healthy,
since the output is about the smallest thing we can make."""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import unittest
from reportlab.pdfgen.canvas import Canvas
class HelloTestCase(unittest.TestCase):
"Simplest test that makes PDF"
def test(self):
c = Canvas(outputfile('test_hello.pdf'))
c.setAuthor('\xe3\x83\x9b\xe3\x83\x86\xe3\x83\xab\xe3\x83\xbbe\xe3\x83\x91\xe3\x83\xb3\xe3\x83\x95\xe3\x83\xac\xe3\x83\x83\xe3\x83\x88')
c.setFont('Helvetica-Bold', 36)
c.drawString(100,700, 'Hello World')
c.save()
def test_rl_config_reset(self):
from reportlab import rl_config
from reportlab.pdfbase import pdfmetrics, _fontdata
tfd = pdfmetrics._typefaces
fbn = _fontdata.fontsByName
tfd[' a ']=1
fbn[' b ']=1
ntfd = len(tfd)
nfbn = len(fbn)
from reportlab.lib import sequencer
seq = sequencer.getSequencer()
seq._dingo = 1
rl_config._reset()
assert not hasattr(seq,'_dingo')
assert not tfd.has_key(' a ') and len(tfd)<ntfd
assert not fbn.has_key(' a ') and len(fbn)<nfbn
def makeSuite():
return makeSuiteForClasses(HelloTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
{
"content_hash": "471a494b1d3581f37e03a21c17aed0a8",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 144,
"avg_line_length": 33.91111111111111,
"alnum_prop": 0.6513761467889908,
"repo_name": "makinacorpus/reportlab-ecomobile",
"id": "27b5f038205ba7385dcfe8153bcb5a9d6cab0f86",
"size": "1624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hello.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "764229"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2863462"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
}
|
"""
Traceplot rank_vlines Bokeh
===========================
_thumb: .1, .8
"""
import arviz as az
data = az.load_arviz_data("non_centered_eight")
ax = az.plot_trace(data, var_names=("tau", "mu"), kind="rank_vlines", backend="bokeh")
|
{
"content_hash": "0abe6b19b1a91f8b621747f404ae83f6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 86,
"avg_line_length": 23.5,
"alnum_prop": 0.5829787234042553,
"repo_name": "mcmcplotlib/mcmcplotlib",
"id": "274e5b205d806f62cbab28518e0701c3e8210d9e",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "_downloads/98c788c74c0d7e50dc45f46244ca7e58/bokeh_plot_trace_vlines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91689"
}
],
"symlink_target": ""
}
|
import math
import Pyro4, time, psutil
import random
#import sqlite3 as sqlite
#import datetime as dt
#perhaps needs to be set somewhere else
Pyro4.config.HMAC_KEY='pRivAt3Key'
#lists for return
stressValues = []
runStartTimeList= []
runDurations = []
MALLOC_LIMIT = 4095
import sys
from Library import getHomepath
sys.path.insert(0, getHomepath() + '/distributions/')
from abstract_dist import *
class dist_exp(abstract_dist):
pass
def functionCount(emulationID,emulationName,emulationLifetimeID,startTimesec,duration, distributionGranularity,distributionArg,resType,HOMEPATH):
startLoad = int(distributionArg["startload"])
stopLoad = int(distributionArg["stopload"])
random.seed(100)
duration = float(duration)
timescale = duration/distributionGranularity
mylambda = 1/float(timescale)
runStartTime = startTimesec
for x in xrange(1,distributionGranularity+1):
timeON = timescale*x/distributionGranularity
if timeON == timescale:
timeOFF = 1
timeON = timescale -1
else:
timeOFF = (timescale - timeON)
durationLambda = 1/float(timeON)
durationExp = random.expovariate(durationLambda)
waitLambda = 1/float(timeOFF)
waitExp = random.expovariate(waitLambda)
insertLoad(startLoad,runStartTime, durationExp)
runStartTime=runStartTime+durationExp+waitExp
triggerType = "time"
return stressValues, runStartTimeList, runDurations, triggerType
def insertRun(stressValue, startTime, runRuration):
stressValues.append(stressValue)
runStartTimeList.append(startTime)
runDurations.append(runRuration)
# print "Inserted RUN: ", stressValue, time.strftime("%Y-%m-%d %H:%M:%S.%f", time.gmtime(startTime)), runRuration
# this function checks if the load is higher than the malloc limit. In that case creates smaller runs
def insertLoad(load, startTime, duration):
# if not a resource type MEM we just insert it
if load > MALLOC_LIMIT and resType == "mem":
div = int(load // MALLOC_LIMIT)
rest = load - (div * MALLOC_LIMIT)
for _ in range(0,div):
insertRun(MALLOC_LIMIT, startTime, duration)
if rest > 0:
insertRun(rest, startTime, duration)
else:
insertRun(load, startTime, duration)
def distHelp():
'''
Help method that gives description of trapezoidal distribution usage
'''
print "Trapezoidal distribution takes in start and stop load (plus malloclimit for MEM) parameters and gradually increasing resource workload by spawning jobs in parallel. Can be used with MEM,IO,NET resource types."
return "Trapezoidal distribution takes in start and stop load (plus malloclimit for MEM) parameters and gradually increasing resource workload by spawning jobs in parallel. Can be used with MEM,IO,NET resource types."
def argNames(Rtype=None):
'''
We specify how many arguments distribution instance require to run properly
Rtype = <MEM, IO, NET>
IMPORTANT: All argument variable names must be in lower case
'''
#discovery of supported resources
if Rtype == None:
argNames = ["mem","io","net","cpu"]
return argNames
if Rtype.lower() == "cpu":
argNames={"startload":{"upperBound":100,"lowerBound":0},"stopload":{"upperBound":100,"lowerBound":0}, "granularity":{"upperBound":100000,"lowerBound":0, "argHelp":"Number of runs to create"}, "duration":{"upperBound":100000,"lowerBound":0, "argHelp":"Time Distribution lasts for.\nUnits: seconds"}, "minJobTime":{"upperBound":10000000,"lowerBound":2, "argHelp":"Minimum time a single job's duration can be (any jobs under will be deleted).\nUnits: seconds"}}
return argNames
#get free amount of memory and set it to upper bound
if Rtype.lower() == "mem":
memReading=psutil.phymem_usage()
allMemory =memReading.total/1048576
argNames={"startload":{"upperBound":allMemory,"lowerBound":50, "argHelp":"Value for distribution to begin at.\nUnits: MB or %"},"stopload":{"upperBound":allMemory,"lowerBound":50, "argHelp":"Value for distribution to stop at.\nUnits: MB or %"}, "granularity":{"upperBound":100000,"lowerBound":0, "argHelp":"Number of runs to create"}, "duration":{"upperBound":100000,"lowerBound":0, "argHelp":"Time Distribution lasts for.\nUnits: seconds"}, "minJobTime":{"upperBound":10000000,"lowerBound":2, "argHelp":"Minimum time a single job's duration can be (any jobs under will be deleted).\nUnits: seconds"}}
RESTYPE = "MEM"
# print "Use Arg's: ",argNames," with mem"
return argNames
if Rtype.lower() == "io":
argNames={"startload":{"upperBound":999999,"lowerBound":0, "argHelp":"Value for distribution to begin at.\nUnits: MB/s throughput"},"stopload":{"upperBound":999999,"lowerBound":0, "argHelp":"Value for distribution to stop at.\nUnits: MB/s throughput"}, "granularity":{"upperBound":100000,"lowerBound":0, "argHelp":"Number of runs to create"}, "duration":{"upperBound":100000,"lowerBound":0, "argHelp":"Time Distribution lasts for.\nUnits: seconds"}, "minJobTime":{"upperBound":10000000,"lowerBound":2, "argHelp":"Minimum time a single job's duration can be (any jobs under will be deleted).\nUnits: seconds"}}
RESTYPE = "IO"
# print "Use Arg's: ",argNames," with io"
return argNames
if Rtype.lower() == "net":
argNames={"startload":{"upperBound":1000000,"lowerBound":0, "argHelp":"Value for distribution to begin at.\nUnits: MB/s throughput"},"stopload":{"upperBound":1000000,"lowerBound":0, "argHelp":"Value for distribution to stop at.\nUnits: MB/s throughput"}, "granularity":{"upperBound":100000,"lowerBound":0, "argHelp":"Number of runs to create"}, "duration":{"upperBound":100000,"lowerBound":0, "argHelp":"Time Distribution lasts for.\nUnits: seconds"}, "minJobTime":{"upperBound":10000000,"lowerBound":2, "argHelp":"Minimum time a single job's duration can be (any jobs under will be deleted).\nUnits: seconds"}}
RESTYPE = "NET"
# print "Use Arg's: ",argNames," with net"
return argNames
if __name__=="__main__":
pass
|
{
"content_hash": "49bb2023432b53776ab2c6fb486ff70e",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 619,
"avg_line_length": 50.23809523809524,
"alnum_prop": 0.6797788309636651,
"repo_name": "cragusa/cocoma",
"id": "67c3fd052a5776a796fd85958dfb5ed7c2717c35",
"size": "7043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributions/dist_exp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21251"
},
{
"name": "JavaScript",
"bytes": "100005"
},
{
"name": "Python",
"bytes": "351175"
},
{
"name": "Ruby",
"bytes": "19866"
},
{
"name": "Shell",
"bytes": "9935"
},
{
"name": "TeX",
"bytes": "310637"
}
],
"symlink_target": ""
}
|
"""Classes for representing collections for the Google Cloud Firestore API."""
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.cloud.firestore_v1.base_collection import (
BaseCollectionReference,
_item_to_document_ref,
)
from google.cloud.firestore_v1 import (
async_query,
async_document,
)
from google.cloud.firestore_v1.document import DocumentReference
from typing import AsyncIterator
from typing import Any, AsyncGenerator, Tuple
# Types needed only for Type Hints
from google.cloud.firestore_v1.transaction import Transaction
class AsyncCollectionReference(BaseCollectionReference):
"""A reference to a collection in a Firestore database.
The collection may already exist or this class can facilitate creation
of documents within the collection.
Args:
path (Tuple[str, ...]): The components in the collection path.
This is a series of strings representing each collection and
sub-collection ID, as well as the document IDs for any documents
that contain a sub-collection.
kwargs (dict): The keyword arguments for the constructor. The only
supported keyword is ``client`` and it must be a
:class:`~google.cloud.firestore_v1.client.Client` if provided. It
represents the client that created this collection reference.
Raises:
ValueError: if
* the ``path`` is empty
* there are an even number of elements
* a collection ID in ``path`` is not a string
* a document ID in ``path`` is not a string
TypeError: If a keyword other than ``client`` is used.
"""
def __init__(self, *path, **kwargs) -> None:
super(AsyncCollectionReference, self).__init__(*path, **kwargs)
def _query(self) -> async_query.AsyncQuery:
"""Query factory.
Returns:
:class:`~google.cloud.firestore_v1.query.Query`
"""
return async_query.AsyncQuery(self)
async def _chunkify(self, chunk_size: int):
async for page in self._query()._chunkify(chunk_size):
yield page
async def add(
self,
document_data: dict,
document_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
) -> Tuple[Any, Any]:
"""Create a document in the Firestore database with the provided data.
Args:
document_data (dict): Property names and values to use for
creating the document.
document_id (Optional[str]): The document identifier within the
current collection. If not provided, an ID will be
automatically assigned by the server (the assigned ID will be
a random 20 character string composed of digits,
uppercase and lowercase letters).
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried. Defaults to a system-specified policy.
timeout (float): The timeout for this request. Defaults to a
system-specified value.
Returns:
Tuple[:class:`google.protobuf.timestamp_pb2.Timestamp`, \
:class:`~google.cloud.firestore_v1.async_document.AsyncDocumentReference`]:
Pair of
* The ``update_time`` when the document was created/overwritten.
* A document reference for the created document.
Raises:
:class:`google.cloud.exceptions.Conflict`:
If ``document_id`` is provided and the document already exists.
"""
document_ref, kwargs = self._prep_add(
document_data,
document_id,
retry,
timeout,
)
write_result = await document_ref.create(document_data, **kwargs)
return write_result.update_time, document_ref
def document(
self, document_id: str = None
) -> async_document.AsyncDocumentReference:
"""Create a sub-document underneath the current collection.
Args:
document_id (Optional[str]): The document identifier
within the current collection. If not provided, will default
to a random 20 character string composed of digits,
uppercase and lowercase and letters.
Returns:
:class:`~google.cloud.firestore_v1.document.async_document.AsyncDocumentReference`:
The child document.
"""
return super(AsyncCollectionReference, self).document(document_id)
async def list_documents(
self,
page_size: int = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
) -> AsyncGenerator[DocumentReference, None]:
"""List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried. Defaults to a system-specified policy.
timeout (float): The timeout for this request. Defaults to a
system-specified value.
Returns:
Sequence[:class:`~google.cloud.firestore_v1.collection.DocumentReference`]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty
"""
request, kwargs = self._prep_list_documents(page_size, retry, timeout)
iterator = await self._client._firestore_api.list_documents(
request=request,
metadata=self._client._rpc_metadata,
**kwargs,
)
async for i in iterator:
yield _item_to_document_ref(self, i)
async def get(
self,
transaction: Transaction = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
) -> list:
"""Read the documents in this collection.
This sends a ``RunQuery`` RPC and returns a list of documents
returned in the stream of ``RunQueryResponse`` messages.
Args:
transaction
(Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]):
An existing transaction that this query will run in.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried. Defaults to a system-specified policy.
timeout (float): The timeout for this request. Defaults to a
system-specified value.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Returns:
list: The documents in this collection that match the query.
"""
query, kwargs = self._prep_get_or_stream(retry, timeout)
return await query.get(transaction=transaction, **kwargs)
async def stream(
self,
transaction: Transaction = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
) -> AsyncIterator[async_document.DocumentSnapshot]:
"""Read the documents in this collection.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.\
Transaction`]):
An existing transaction that the query will run in.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried. Defaults to a system-specified policy.
timeout (float): The timeout for this request. Defaults to a
system-specified value.
Yields:
:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`:
The next document that fulfills the query.
"""
query, kwargs = self._prep_get_or_stream(retry, timeout)
async for d in query.stream(transaction=transaction, **kwargs):
yield d # pytype: disable=name-error
|
{
"content_hash": "2f78c53961a8c135852e21fb024779bf",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 95,
"avg_line_length": 39.723684210526315,
"alnum_prop": 0.6242685215855139,
"repo_name": "googleapis/python-firestore",
"id": "52847a3dcf0fcf4961b94a8815eb5a8a2c6872e3",
"size": "9653",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/firestore_v1/async_collection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2269873"
},
{
"name": "Shell",
"bytes": "32068"
}
],
"symlink_target": ""
}
|
from utils.dockerutil import DockerUtil
from utils.service_discovery.config_stores import get_config_store, SD_CONFIG_BACKENDS
def sd_configcheck(agentConfig, configs):
"""Trace how the configuration objects are loaded and from where.
Also print containers detected by the agent and templates from the config store."""
print("\nSource of the configuration objects built by the agent:\n")
for check_name, config in configs.iteritems():
print('Check "%s":\n source --> %s\n config --> %s\n' % (check_name, config[0], config[1]))
try:
print_containers()
except Exception:
print("Failed to collect containers info.")
try:
print_templates(agentConfig)
except Exception:
print("Failed to collect configuration templates.")
def print_containers():
containers = DockerUtil().client.containers()
print("\nContainers info:\n")
print("Number of containers found: %s" % len(containers))
for co in containers:
c_id = 'ID: %s' % co.get('Id')[:12]
c_image = 'image: %s' % co.get('Image')
c_name = 'name: %s' % DockerUtil.container_name_extractor(co)[0]
print("\t- %s %s %s" % (c_id, c_image, c_name))
print('\n')
def print_templates(agentConfig):
if agentConfig.get('sd_config_backend') in SD_CONFIG_BACKENDS:
print("Configuration templates:\n")
templates = {}
sd_template_dir = agentConfig.get('sd_template_dir')
config_store = get_config_store(agentConfig)
try:
templates = config_store.dump_directory(sd_template_dir)
except Exception as ex:
print("Failed to extract configuration templates from the backend:\n%s" % str(ex))
for img, tpl in templates.iteritems():
print(
"- Image %s:\n\tcheck name: %s\n\tinit_config: %s\n\tinstance: %s" % (
img,
tpl.get('check_names'),
tpl.get('init_configs'),
tpl.get('instances'),
)
)
|
{
"content_hash": "9122556531c182dd29e8a35c70fbf07c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 101,
"avg_line_length": 38.31481481481482,
"alnum_prop": 0.6022232962783953,
"repo_name": "pmav99/praktoras",
"id": "60b4ba3ec56ada83112b532d770d0429c0de03e6",
"size": "2222",
"binary": false,
"copies": "1",
"ref": "refs/heads/conmon-13",
"path": "utils/service_discovery/configcheck.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "9060"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "2179610"
},
{
"name": "Ruby",
"bytes": "103726"
},
{
"name": "Shell",
"bytes": "58242"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20170507_2026'),
]
operations = [
migrations.AlterField(
model_name='post',
name='click_count',
field=models.PositiveIntegerField(default=0, verbose_name='点击次数'),
),
]
|
{
"content_hash": "3328c93e0814eb48dcbb6928aac5e30a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.603448275862069,
"repo_name": "r26zhao/django_blog",
"id": "c14c8e32c047b9c9fd19759ada4df849926576bc",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0006_auto_20170515_0003.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "240260"
},
{
"name": "Python",
"bytes": "770615"
}
],
"symlink_target": ""
}
|
"""A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations when
dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only gzip
and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
__docformat__ = "restructuredtext en"
import os
from shutil import rmtree
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way that
an instance of `_FileOpeners` itself can be indexed with the keys of that
dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return self._file_openers.keys()
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
`destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by path.
Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created. The
default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the low-level
details of downloading the file, allowing you to simply pass in a valid
file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen, URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
file(upath, 'w').write(openedurl.read())
except URLError:
raise URLError("URL not found: %s" % path)
else:
try:
# TODO: Why not just copy the file with shutils.copyfile?
fp = file(path, 'r')
file(upath, 'w').write(fp.read())
except IOError:
raise IOError("File not found: %s" % path)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return
the path to the cached file.
If path is a local file, _findfile will return a path to that local
file.
The search will include possible compressed versions of the file and
return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen, URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the `DataSource`
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or directory)
to all the files it handles. Use `Repository` when you will be working
with multiple files from one base URL. Initialize `Repository` with the
base URL, then refer to each file by its filename only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the DataSource
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError, \
"Directory listing of URLs, not supported yet."
else:
return os.listdir(self._baseurl)
|
{
"content_hash": "d2d0a34ac750c7ee2bdf5d810dfd5118",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 82,
"avg_line_length": 32.4496855345912,
"alnum_prop": 0.5845527667409632,
"repo_name": "NirBenTalLab/proorigami-cde-package",
"id": "957e7cb1827aec3c5476e95d40e47fc28f4b1c2b",
"size": "20638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cde-root/usr/lib64/python2.4/site-packages/numpy/lib/_datasource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "16762"
},
{
"name": "Python",
"bytes": "4730244"
},
{
"name": "Shell",
"bytes": "9915"
}
],
"symlink_target": ""
}
|
import optparse
import os
import shutil
import sys
from bridge_generator import BridgeGenerator
from interface_generator import InterfaceGenerator
from java_class import JavaClassLoader
from string import Template
from wrapper_generator import WrapperGenerator
# Classes list that have to generate bridge and wrap code.
CLASSES_TO_BE_PROCESS = [
'XWalkCookieManagerInternal',
'XWalkDownloadListenerInternal',
'XWalkExtensionInternal',
'XWalkViewInternal',
'XWalkUIClientInternal',
'XWalkResourceClientInternal',
'XWalkPreferencesInternal',
'XWalkNavigationItemInternal',
'XWalkNavigationHistoryInternal',
'XWalkJavascriptResultHandlerInternal',
'XWalkJavascriptResultInternal',
'ClientCertRequestHandlerInternal',
'ClientCertRequestInternal',
]
REFLECTION_HERLPER = [
'ReflectMethod.java',
'ReflectField.java',
'ReflectConstructor.java',
]
WRAPPER_PACKAGE = 'org.xwalk.core'
BRIDGE_PACKAGE = 'org.xwalk.core.internal'
bridge_path = ''
wrapper_path = ''
def PerformSerialize(output_path, generator):
file_name = generator.GetGeneratedClassFileName()
with open(os.path.join(output_path, file_name), 'w') as f:
f.write(generator.GetGeneratedCode())
print('%s has been generated!' % file_name)
def GenerateJavaBindingClass(input_dir):
class_loader = JavaClassLoader(input_dir, CLASSES_TO_BE_PROCESS)
for input_class in CLASSES_TO_BE_PROCESS:
print('Generate bridge and wrapper code for %s' % input_class)
java_data = class_loader.GetJavaData(input_class)
if java_data.class_type == 'interface':
# Generate Interface code.
interface_generator = InterfaceGenerator(java_data, class_loader)
interface_generator.RunTask()
PerformSerialize(wrapper_path, interface_generator)
else:
# Generate Bridge code.
bridge_generator = BridgeGenerator(java_data, class_loader)
bridge_generator.RunTask()
PerformSerialize(bridge_path, bridge_generator)
# Generate Wrapper code.
wrapper_generator = WrapperGenerator(java_data, class_loader)
wrapper_generator.RunTask()
PerformSerialize(wrapper_path, wrapper_generator)
def GenerateJavaReflectClass(input_dir):
for helper in REFLECTION_HERLPER:
with open(os.path.join(wrapper_path, helper), 'w') as f:
for line in open(os.path.join(input_dir, helper), 'r'):
if line.startswith('package '):
f.write('package ' + WRAPPER_PACKAGE + ';\n')
else:
f.write(line)
def GenerateJavaTemplateClass(template_dir,
api_version, min_api_version, verify_xwalk_apk):
template_file = os.path.join(template_dir, 'XWalkCoreVersion.template')
template = Template(open(template_file, 'r').read())
value = {'API_VERSION': api_version,
'MIN_API_VERSION': min_api_version}
output_file = os.path.join(bridge_path, "XWalkCoreVersion.java")
with open(output_file, 'w') as f:
f.write(template.substitute(value))
template_file = os.path.join(template_dir, 'XWalkAppVersion.template')
template = Template(open(template_file, 'r').read())
value = {'API_VERSION': api_version,
'VERIFY_XWALK_APK': 'true' if verify_xwalk_apk == 1 else 'false'}
output_file = os.path.join(wrapper_path, "XWalkAppVersion.java")
with open(output_file, 'w') as f:
f.write(template.substitute(value))
def Touch(path):
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def main(argv):
usage = """Usage: %prog [OPTIONS]
This script can generate bridge and wrap source files for given directory.
\'input_dir\' is provided as directory containing source files.
"""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('--input-dir',
help=('Input source file directory which contains '
'input files'))
option_parser.add_option('--template-dir',
help=('Templates directory to generate java source '
'file'))
option_parser.add_option('--bridge-output',
help=('Output directory where the bridge code is '
'placed.'))
option_parser.add_option('--wrapper-output',
help=('Output directory where the wrap code is '
'placed.'))
option_parser.add_option('--stamp', help='the file to touch on success.')
option_parser.add_option('--api-version', help='API Version')
option_parser.add_option('--min-api-version', help='Min API Version')
option_parser.add_option('--verify-xwalk-apk', default=0, type='int',
help='Verify Crosswalk library APK before loading')
options, _ = option_parser.parse_args(argv)
if (not options.input_dir or
not options.bridge_output or
not options.wrapper_output):
print('Error: Must specify input and output.')
return 1
if os.path.isdir(options.bridge_output):
shutil.rmtree(options.bridge_output)
if os.path.isdir(options.wrapper_output):
shutil.rmtree(options.wrapper_output)
global bridge_path
bridge_path = os.path.join(options.bridge_output,
os.path.sep.join(BRIDGE_PACKAGE.split('.')))
os.makedirs(bridge_path)
global wrapper_path
wrapper_path = os.path.join(options.wrapper_output,
os.path.sep.join(WRAPPER_PACKAGE.split('.')))
os.makedirs(wrapper_path)
if options.input_dir:
GenerateJavaBindingClass(options.input_dir)
GenerateJavaReflectClass(options.input_dir)
if options.template_dir:
GenerateJavaTemplateClass(options.template_dir,
options.api_version, options.min_api_version, options.verify_xwalk_apk)
if options.stamp:
Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "f98ffd74aa20fbd9e1d81585cbb557c6",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 35.94512195121951,
"alnum_prop": 0.6810856658184903,
"repo_name": "alex-zhang/crosswalk",
"id": "243615a0da462c7d69d22365394daec85b371628",
"size": "6082",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/reflection_generator/reflection_generator.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "31821"
},
{
"name": "C++",
"bytes": "1989250"
},
{
"name": "CSS",
"bytes": "1709"
},
{
"name": "HTML",
"bytes": "114591"
},
{
"name": "Java",
"bytes": "1186894"
},
{
"name": "JavaScript",
"bytes": "103706"
},
{
"name": "Objective-C",
"bytes": "688"
},
{
"name": "Objective-C++",
"bytes": "16628"
},
{
"name": "Python",
"bytes": "281841"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
from typing import Optional, Tuple
from enum import IntEnum
from ..tl.types import InputPeerUser, InputPeerChat, InputPeerChannel
class SessionState:
"""
Stores the information needed to fetch updates and about the current user.
* user_id: 64-bit number representing the user identifier.
* dc_id: 32-bit number relating to the datacenter identifier where the user is.
* bot: is the logged-in user a bot?
* pts: 64-bit number holding the state needed to fetch updates.
* qts: alternative 64-bit number holding the state needed to fetch updates.
* date: 64-bit number holding the date needed to fetch updates.
* seq: 64-bit-number holding the sequence number needed to fetch updates.
* takeout_id: 64-bit-number holding the identifier of the current takeout session.
Note that some of the numbers will only use 32 out of the 64 available bits.
However, for future-proofing reasons, we recommend you pretend they are 64-bit long.
"""
__slots__ = ('user_id', 'dc_id', 'bot', 'pts', 'qts', 'date', 'seq', 'takeout_id')
def __init__(
self,
user_id: int,
dc_id: int,
bot: bool,
pts: int,
qts: int,
date: int,
seq: int,
takeout_id: Optional[int]
):
self.user_id = user_id
self.dc_id = dc_id
self.bot = bot
self.pts = pts
self.qts = qts
self.date = date
self.seq = seq
self.takeout_id = takeout_id
def __repr__(self):
return repr({k: getattr(self, k) for k in self.__slots__})
class ChannelState:
"""
Stores the information needed to fetch updates from a channel.
* channel_id: 64-bit number representing the channel identifier.
* pts: 64-bit number holding the state needed to fetch updates.
"""
__slots__ = ('channel_id', 'pts')
def __init__(
self,
channel_id: int,
pts: int,
):
self.channel_id = channel_id
self.pts = pts
def __repr__(self):
return repr({k: getattr(self, k) for k in self.__slots__})
class EntityType(IntEnum):
"""
You can rely on the type value to be equal to the ASCII character one of:
* 'U' (85): this entity belongs to a :tl:`User` who is not a ``bot``.
* 'B' (66): this entity belongs to a :tl:`User` who is a ``bot``.
* 'G' (71): this entity belongs to a small group :tl:`Chat`.
* 'C' (67): this entity belongs to a standard broadcast :tl:`Channel`.
* 'M' (77): this entity belongs to a megagroup :tl:`Channel`.
* 'E' (69): this entity belongs to an "enormous" "gigagroup" :tl:`Channel`.
"""
USER = ord('U')
BOT = ord('B')
GROUP = ord('G')
CHANNEL = ord('C')
MEGAGROUP = ord('M')
GIGAGROUP = ord('E')
def canonical(self):
"""
Return the canonical version of this type.
"""
return _canon_entity_types[self]
_canon_entity_types = {
EntityType.USER: EntityType.USER,
EntityType.BOT: EntityType.USER,
EntityType.GROUP: EntityType.GROUP,
EntityType.CHANNEL: EntityType.CHANNEL,
EntityType.MEGAGROUP: EntityType.CHANNEL,
EntityType.GIGAGROUP: EntityType.CHANNEL,
}
class Entity:
"""
Stores the information needed to use a certain user, chat or channel with the API.
* ty: 8-bit number indicating the type of the entity (of type `EntityType`).
* id: 64-bit number uniquely identifying the entity among those of the same type.
* hash: 64-bit signed number needed to use this entity with the API.
The string representation of this class is considered to be stable, for as long as
Telegram doesn't need to add more fields to the entities. It can also be converted
to bytes with ``bytes(entity)``, for a more compact representation.
"""
__slots__ = ('ty', 'id', 'hash')
def __init__(
self,
ty: EntityType,
id: int,
hash: int
):
self.ty = ty
self.id = id
self.hash = hash
@property
def is_user(self):
"""
``True`` if the entity is either a user or a bot.
"""
return self.ty in (EntityType.USER, EntityType.BOT)
@property
def is_group(self):
"""
``True`` if the entity is a small group chat or `megagroup`_.
.. _megagroup: https://telegram.org/blog/supergroups5k
"""
return self.ty in (EntityType.GROUP, EntityType.MEGAGROUP)
@property
def is_broadcast(self):
"""
``True`` if the entity is a broadcast channel or `broadcast group`_.
.. _broadcast group: https://telegram.org/blog/autodelete-inv2#groups-with-unlimited-members
"""
return self.ty in (EntityType.CHANNEL, EntityType.GIGAGROUP)
@classmethod
def from_str(cls, string: str):
"""
Convert the string into an `Entity`.
"""
try:
ty, id, hash = string.split('.')
ty, id, hash = ord(ty), int(id), int(hash)
except AttributeError:
raise TypeError(f'expected str, got {string!r}') from None
except (TypeError, ValueError):
raise ValueError(f'malformed entity str (must be T.id.hash), got {string!r}') from None
return cls(EntityType(ty), id, hash)
@classmethod
def from_bytes(cls, blob):
"""
Convert the bytes into an `Entity`.
"""
try:
ty, id, hash = struct.unpack('<Bqq', blob)
except struct.error:
raise ValueError(f'malformed entity data, got {string!r}') from None
return cls(EntityType(ty), id, hash)
def __str__(self):
return f'{chr(self.ty)}.{self.id}.{self.hash}'
def __bytes__(self):
return struct.pack('<Bqq', self.ty, self.id, self.hash)
def _as_input_peer(self):
if self.is_user:
return InputPeerUser(self.id, self.hash)
elif self.ty == EntityType.GROUP:
return InputPeerChat(self.id)
else:
return InputPeerChannel(self.id, self.hash)
def __repr__(self):
return repr({k: getattr(self, k) for k in self.__slots__})
|
{
"content_hash": "ecd6e3dc7555c4725b01928656bcbc2d",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 100,
"avg_line_length": 31.630769230769232,
"alnum_prop": 0.60068093385214,
"repo_name": "LonamiWebs/Telethon",
"id": "5025dd3883e48ff1a5b6c44564743b0577f0d3d2",
"size": "6168",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "telethon/_updates/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "776"
},
{
"name": "CSS",
"bytes": "9611"
},
{
"name": "HTML",
"bytes": "8839"
},
{
"name": "JavaScript",
"bytes": "7489"
},
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "1091881"
},
{
"name": "Shell",
"bytes": "352"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import requests
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Load the data
housing_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
housing_header = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
cols_used = ['CRIM', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'TAX', 'PTRATIO', 'B', 'LSTAT']
num_features = len(cols_used)
housing_file = requests.get(housing_url)
housing_data = [[float(x) for x in y.split(' ') if len(x)>=1] for y in housing_file.text.split('\n') if len(y)>=1]
y_vals = np.transpose([np.array([y[13] for y in housing_data])])
x_vals = np.array([[x for i,x in enumerate(y) if housing_header[i] in cols_used] for y in housing_data])
## Min-Max Scaling
x_vals = (x_vals - x_vals.min(0)) / x_vals.ptp(0)
# Split the data into train and test sets
train_indices = np.random.choice(len(x_vals), round(len(x_vals)*0.8), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Declare k-value and batch size
k = 4
batch_size=len(x_vals_test)
# Placeholders
x_data_train = tf.placeholder(shape=[None, num_features], dtype=tf.float32)
x_data_test = tf.placeholder(shape=[None, num_features], dtype=tf.float32)
y_target_train = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target_test = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Declare distance metric
# L1
distance = tf.reduce_sum(tf.abs(tf.sub(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=2)
# L2
#distance = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=1))
# Predict: Get min distance index (Nearest neighbor)
top_k_xvals, top_k_indices = tf.nn.top_k(tf.neg(distance), k=k)
x_sums = tf.expand_dims(tf.reduce_sum(top_k_xvals, 1),1)
x_sums_repeated = tf.matmul(x_sums,tf.ones([1, k], tf.float32))
x_val_weights = tf.expand_dims(tf.div(top_k_xvals,x_sums_repeated), 1)
top_k_yvals = tf.gather(y_target_train, top_k_indices)
prediction = tf.squeeze(tf.batch_matmul(x_val_weights,top_k_yvals), squeeze_dims=[1])
# Calculate MSE
mse = tf.div(tf.reduce_sum(tf.square(tf.sub(prediction, y_target_test))), batch_size)
# Calculate how many loops over training data
num_loops = int(np.ceil(len(x_vals_test)/batch_size))
for i in range(num_loops):
min_index = i*batch_size
max_index = min((i+1)*batch_size,len(x_vals_train))
x_batch = x_vals_test[min_index:max_index]
y_batch = y_vals_test[min_index:max_index]
predictions = sess.run(prediction, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch,
y_target_train: y_vals_train, y_target_test: y_batch})
batch_mse = sess.run(mse, feed_dict={x_data_train: x_vals_train, x_data_test: x_batch,
y_target_train: y_vals_train, y_target_test: y_batch})
print('Batch #' + str(i+1) + ' MSE: ' + str(np.round(batch_mse,3)))
# Plot prediction and actual distribution
bins = np.linspace(5, 50, 45)
plt.hist(predictions, bins, alpha=0.5, label='Prediction')
plt.hist(y_batch, bins, alpha=0.5, label='Actual')
plt.title('Histogram of Predicted and Actual Values')
plt.xlabel('Med Home Value in $1,000s')
plt.ylabel('Frequency')
plt.legend(loc='upper right')
plt.show()
|
{
"content_hash": "1c4de6915d5a6a456a6aa791aaa62362",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 122,
"avg_line_length": 41.28735632183908,
"alnum_prop": 0.6801224944320713,
"repo_name": "benjaminoh1/tensorflowcookbook",
"id": "2d0f53770bb33dbff10fcf879e63c56f0d579531",
"size": "4506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chapter 05/nearest_neighbor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "313397"
}
],
"symlink_target": ""
}
|
from django.urls import path, reverse_lazy
import django.contrib.auth.views as auth_views
urlpatterns = [
path('login/', auth_views.LoginView.as_view(), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page=reverse_lazy('login')), name='logout'),
]
|
{
"content_hash": "040e18172f20fcf29e32859fc2206c04",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 99,
"avg_line_length": 34.5,
"alnum_prop": 0.7101449275362319,
"repo_name": "mjtamlyn/archery-scoring",
"id": "f955400a1546bb5ee3d01424ad4fb74448e5f29c",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243118"
},
{
"name": "HTML",
"bytes": "98205"
},
{
"name": "JavaScript",
"bytes": "255325"
},
{
"name": "Makefile",
"bytes": "173"
},
{
"name": "Python",
"bytes": "361105"
},
{
"name": "Ruby",
"bytes": "894"
},
{
"name": "Shell",
"bytes": "2460"
}
],
"symlink_target": ""
}
|
import re
from functools import reduce
from pyparsing import (
infixNotation,
opAssoc,
Optional,
Literal,
CharsNotIn,
ParseException,
)
from logging import Filter, _levelNames
import six
from django.apps import apps
from django.db import models
from django.conf import settings
from awx.main.utils.common import get_search_fields
__all__ = ['SmartFilter', 'ExternalLoggerEnabled']
class FieldFromSettings(object):
"""
Field interface - defaults to getting value from setting
if otherwise set, provided value will take precedence
over value in settings
"""
def __init__(self, setting_name):
self.setting_name = setting_name
def __get__(self, instance, type=None):
if self.setting_name in getattr(instance, 'settings_override', {}):
return instance.settings_override[self.setting_name]
return getattr(settings, self.setting_name, None)
def __set__(self, instance, value):
if value is None:
if hasattr(instance, 'settings_override'):
instance.settings_override.pop('instance', None)
else:
if not hasattr(instance, 'settings_override'):
instance.settings_override = {}
instance.settings_override[self.setting_name] = value
class ExternalLoggerEnabled(Filter):
# Prevents recursive logging loops from swamping the server
LOGGER_BLACKLIST = (
# loggers that may be called in process of emitting a log
'awx.main.utils.handlers',
'awx.main.utils.formatters',
'awx.main.utils.filters',
'awx.main.utils.encryption',
'awx.main.utils.log',
# loggers that may be called getting logging settings
'awx.conf'
)
lvl = FieldFromSettings('LOG_AGGREGATOR_LEVEL')
enabled_loggers = FieldFromSettings('LOG_AGGREGATOR_LOGGERS')
enabled_flag = FieldFromSettings('LOG_AGGREGATOR_ENABLED')
def __init__(self, **kwargs):
super(ExternalLoggerEnabled, self).__init__()
for field_name, field_value in kwargs.items():
if not isinstance(ExternalLoggerEnabled.__dict__.get(field_name, None), FieldFromSettings):
raise Exception('%s is not a valid kwarg' % field_name)
if field_value is None:
continue
setattr(self, field_name, field_value)
def filter(self, record):
"""
Uses the database settings to determine if the current
external log configuration says that this particular record
should be sent to the external log aggregator
False - should not be logged
True - should be logged
"""
# Logger exceptions
for logger_name in self.LOGGER_BLACKLIST:
if record.name.startswith(logger_name):
return False
# General enablement
if not self.enabled_flag:
return False
# Level enablement
if record.levelno < _levelNames[self.lvl]:
# logging._levelNames -> logging._nameToLevel in python 3
return False
# Logger type enablement
loggers = self.enabled_loggers
if not loggers:
return False
if record.name.startswith('awx.analytics'):
base_path, headline_name = record.name.rsplit('.', 1)
return bool(headline_name in loggers)
else:
if '.' in record.name:
base_name, trailing_path = record.name.split('.', 1)
else:
base_name = record.name
return bool(base_name in loggers)
def string_to_type(t):
if t == u'null':
return None
if t == u'true':
return True
elif t == u'false':
return False
if re.search(r'^[-+]?[0-9]+$',t):
return int(t)
if re.search(r'^[-+]?[0-9]+\.[0-9]+$',t):
return float(t)
return t
def get_model(name):
return apps.get_model('main', name)
class SmartFilter(object):
SEARCHABLE_RELATIONSHIP = 'ansible_facts'
class BoolOperand(object):
def __init__(self, t):
kwargs = dict()
k, v = self._extract_key_value(t)
k, v = self._json_path_to_contains(k, v)
Host = get_model('host')
search_kwargs = self._expand_search(k, v)
if search_kwargs:
kwargs.update(search_kwargs)
q = reduce(lambda x, y: x | y, [models.Q(**{u'%s__icontains' % _k:_v}) for _k, _v in kwargs.items()])
self.result = Host.objects.filter(q)
else:
# detect loops and restrict access to sensitive fields
# this import is intentional here to avoid a circular import
from awx.api.filters import FieldLookupBackend
FieldLookupBackend().get_field_from_lookup(Host, k)
kwargs[k] = v
self.result = Host.objects.filter(**kwargs)
def strip_quotes_traditional_logic(self, v):
if type(v) is six.text_type and v.startswith('"') and v.endswith('"'):
return v[1:-1]
return v
def strip_quotes_json_logic(self, v):
if type(v) is six.text_type and v.startswith('"') and v.endswith('"') and v != u'"null"':
return v[1:-1]
return v
'''
TODO: We should be able to express this in the grammar and let
pyparsing do the heavy lifting.
TODO: separate django filter requests from our custom json filter
request so we don't process the key any. This could be
accomplished using a whitelist or introspecting the
relationship refered to to see if it's a jsonb type.
'''
def _json_path_to_contains(self, k, v):
if not k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP):
v = self.strip_quotes_traditional_logic(v)
return (k, v)
# Strip off leading relationship key
if k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP + '__'):
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP) + 2
else:
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP)
k = k[strip_len:]
pieces = k.split(u'__')
assembled_k = u'%s__contains' % (SmartFilter.SEARCHABLE_RELATIONSHIP)
assembled_v = None
last_v = None
last_kv = None
for i, piece in enumerate(pieces):
new_kv = dict()
if piece.endswith(u'[]'):
new_v = []
new_kv[piece[0:-2]] = new_v
else:
new_v = dict()
new_kv[piece] = new_v
if last_kv is None:
assembled_v = new_kv
elif type(last_v) is list:
last_v.append(new_kv)
elif type(last_v) is dict:
last_kv[last_kv.keys()[0]] = new_kv
last_v = new_v
last_kv = new_kv
v = self.strip_quotes_json_logic(v)
if type(last_v) is list:
last_v.append(v)
elif type(last_v) is dict:
last_kv[last_kv.keys()[0]] = v
return (assembled_k, assembled_v)
def _extract_key_value(self, t):
t_len = len(t)
k = None
v = None
# key
# "something"=
v_offset = 2
if t_len >= 2 and t[0] == "\"" and t[2] == "\"":
k = t[1]
v_offset = 4
# something=
else:
k = t[0]
# value
# ="something"
if t_len > (v_offset + 2) and t[v_offset] == "\"" and t[v_offset + 2] == "\"":
v = u'"' + six.text_type(t[v_offset + 1]) + u'"'
#v = t[v_offset + 1]
# empty ""
elif t_len > (v_offset + 1):
v = u""
# no ""
else:
v = string_to_type(t[v_offset])
return (k, v)
def _expand_search(self, k, v):
if 'search' not in k:
return None
model, relation = None, None
if k == 'search':
model = get_model('host')
elif k.endswith('__search'):
relation = k.split('__')[0]
try:
model = get_model(relation)
except LookupError:
raise ParseException('No related field named %s' % relation)
search_kwargs = {}
if model is not None:
search_fields = get_search_fields(model)
for field in search_fields:
if relation is not None:
k = '{0}__{1}'.format(relation, field)
else:
k = field
search_kwargs[k] = v
return search_kwargs
class BoolBinOp(object):
def __init__(self, t):
self.result = None
i = 2
while i < len(t[0]):
if not self.result:
self.result = t[0][0].result
right = t[0][i].result
self.result = self.execute_logic(self.result, right)
i += 2
class BoolAnd(BoolBinOp):
def execute_logic(self, left, right):
return left & right
class BoolOr(BoolBinOp):
def execute_logic(self, left, right):
return left | right
@classmethod
def query_from_string(cls, filter_string):
'''
TODO:
* handle values with " via: a.b.c.d="hello\"world"
* handle keys with " via: a.\"b.c="yeah"
* handle key with __ in it
'''
filter_string_raw = filter_string
filter_string = six.text_type(filter_string)
unicode_spaces = list(set(six.text_type(c) for c in filter_string if c.isspace()))
unicode_spaces_other = unicode_spaces + [u'(', u')', u'=', u'"']
atom = CharsNotIn(unicode_spaces_other)
atom_inside_quotes = CharsNotIn(u'"')
atom_quoted = Literal('"') + Optional(atom_inside_quotes) + Literal('"')
EQUAL = Literal('=')
grammar = ((atom_quoted | atom) + EQUAL + Optional((atom_quoted | atom)))
grammar.setParseAction(cls.BoolOperand)
boolExpr = infixNotation(grammar, [
("and", 2, opAssoc.LEFT, cls.BoolAnd),
("or", 2, opAssoc.LEFT, cls.BoolOr),
])
try:
res = boolExpr.parseString('(' + filter_string + ')')
except ParseException:
raise RuntimeError(u"Invalid query %s" % filter_string_raw)
if len(res) > 0:
return res[0].result
raise RuntimeError("Parsing the filter_string %s went terribly wrong" % filter_string)
|
{
"content_hash": "f859206293c821267c817f6df3f990ca",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 117,
"avg_line_length": 32.75595238095238,
"alnum_prop": 0.5288933309104125,
"repo_name": "wwitzel3/awx",
"id": "eaf8c805b6c26c68966198a5c0931470c378db8e",
"size": "11006",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "awx/main/utils/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303046"
},
{
"name": "Dockerfile",
"bytes": "5713"
},
{
"name": "HTML",
"bytes": "496559"
},
{
"name": "JavaScript",
"bytes": "3513112"
},
{
"name": "Makefile",
"bytes": "21133"
},
{
"name": "PowerShell",
"bytes": "10176"
},
{
"name": "Python",
"bytes": "3904288"
},
{
"name": "Shell",
"bytes": "13833"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
# just a helper
ClampMethod = namedtuple('ClampMethod', 'name returntype argtypes throws '
'method_annotations parameter_annotations')
|
{
"content_hash": "ca4fa27c8ee7aec7aefec1a590aa5853",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 74,
"avg_line_length": 39.2,
"alnum_prop": 0.7091836734693877,
"repo_name": "alvin319/CarnotKE",
"id": "202c11aeb3b3a65bd10d510e720eb0324267a3f3",
"size": "196",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "jyhton/tests/python/custom_proxymaker/clamp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1605"
},
{
"name": "Batchfile",
"bytes": "48974"
},
{
"name": "C",
"bytes": "2514"
},
{
"name": "CSS",
"bytes": "15212"
},
{
"name": "GAP",
"bytes": "129853"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "Groovy",
"bytes": "6780"
},
{
"name": "HTML",
"bytes": "33215526"
},
{
"name": "Java",
"bytes": "14173221"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "2261"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "19642"
},
{
"name": "Python",
"bytes": "26386087"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "103473"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "436870"
}
],
"symlink_target": ""
}
|
import paramiko
class GetAixData:
def __init__(self, ip, ssh_port, timeout, usr, pwd, use_key_file, key_file,
get_serial_info, get_hardware_info, get_os_details,
get_cpu_info, get_memory_info, ignore_domain, upload_ipv6, debug):
self.machine_name = ip
self.port = int(ssh_port)
self.timeout = timeout
self.username = usr
self.password = pwd
self.ssh = paramiko.SSHClient()
self.use_key_file = use_key_file
self.key_file = key_file
self.get_serial_info = get_serial_info
self.get_hardware_info = get_hardware_info
self.get_os_details = get_os_details
self.get_cpu_info = get_cpu_info
self.get_memory_info = get_memory_info
self.ignore_domain = ignore_domain
self.upload_ipv6 = upload_ipv6
self.debug = debug
self.ssh = paramiko.SSHClient()
self.conn = None
self.sysdata = {}
self.alldata = []
self.name = None
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def main(self):
self.connect()
self.get_sys()
self.get_IP()
self.alldata.append(self.sysdata)
return self.alldata
def connect(self):
try:
if not self.use_key_file:
self.ssh.connect(str(self.machine_name), port=self.port,
username=self.username, password=self.password, timeout=self.timeout)
else:
self.ssh.connect(str(self.machine_name), port=self.port,
username=self.username, key_filename=self.key_file, timeout=self.timeout)
except paramiko.AuthenticationException:
print str(self.machine_name) + ': authentication failed'
return None
except Exception as err:
print str(self.machine_name) + ': ' + str(err)
return None
def get_sys(self):
if self.get_cpu_info:
cmd = 'lsconf | egrep -i "system model|machine serial|processor type|number of processors|' \
'processor clock speed|cpu type|kernel type|^memory size|disk drive|host name"; oslevel'
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30)
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
osver = data_out[-1].strip()
self.sysdata.update({'osver': osver if osver else 'D42_NULL'})
self.sysdata.update({'os': 'AIX'})
disknum = 0
for x in data_out:
if 'System Model' in x:
pass
if 'Machine Serial Number' in x:
serial = x.split()[-1].strip()
self.sysdata.update({'serial_no': serial})
if 'Number Of Processors' in x:
cpucount = x.split()[-1].strip()
self.sysdata.update({'cpucount': cpucount})
if 'Processor Clock Speed' in x:
cpupower = x.split()[-2].strip()
self.sysdata.update({'cpupower': cpupower})
if 'CPU Type' in x:
pass
if 'Kernel Type' in x:
pass
if 'Memory Size' in x:
memory = x.split()[-2].strip()
self.sysdata.update({'memory': memory})
if 'Disk Drive' in x:
disknum += 1
# hddsize = self.get_hdd_size(hddname)
# self.sysdata.update({'hddsize':hddsize})
if 'Host Name' in x:
devicename = x.split()[-1].strip()
if self.ignore_domain:
if '.' in devicename:
self.name = devicename.split('.')[0]
else:
self.name = devicename
else:
self.name = devicename
self.sysdata.update({'name': self.name})
self.sysdata.update({'hddcount': disknum})
else:
print data_err
def get_MAC(self, nicname):
cmd = "entstat -d %s| grep -i 'hardware address'" % nicname
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30)
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
mac = data_out[0].split()[2].strip()
return mac
else:
print 'Error: ', data_err
return None
def get_IP(self):
stdin, stdout, stderr = self.ssh.exec_command("/usr/sbin/ifconfig -a", timeout=30)
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
nics = []
header = ''
for rec in data_out:
if rec.startswith('\t'):
header += rec
else:
if header == '':
header += rec
else:
nics.append(list(header.split('\n')))
header = ''
header += rec
nics.append(list(header.split('\n')))
for nic in nics:
nicname = nic[0].split(':')[0]
if not nicname.startswith('lo'):
mac = self.get_MAC(nicname)
for rec in nic:
nicdata = {}
macdata = {}
if 'inet ' in rec or 'inet6 ' in rec:
ip = rec.split()[1]
if '/' in ip: # ipv6
ip = ip.split('/')[0]
name = self.name
nicdata.update({'ipaddress': ip})
nicdata.update({'macaddress': mac})
nicdata.update({'device': name})
nicdata.update({'tag': nicname})
self.alldata.append(nicdata)
if mac != '':
macdata.update({'macaddress': mac})
macdata.update({'port_name': nicname})
macdata.update({'device': name})
self.alldata.append(macdata)
else:
print 'Error: ', data_err
def get_hdd_size(self, hddname):
cmd = "bootinfo -s %s" % hddname
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=30)
data_out = stdout.readlines()
data_err = stderr.readlines()
if not data_err:
size = int(data_out[0].strip()) / 1024
return str(size)
else:
print 'Error: ', data_err
|
{
"content_hash": "fd669872b04d46035e3643af4b9c01d2",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 106,
"avg_line_length": 41.61494252873563,
"alnum_prop": 0.44966164894351607,
"repo_name": "device42/nix_bsd_mac_inventory",
"id": "55f5edb4d0a5833362bc8fb578fdeb7fc3db1e61",
"size": "7241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module_aix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129084"
}
],
"symlink_target": ""
}
|
import os
from datetime import datetime
import json
directory = r'C:\Source\hq\data\supervisor_export_all_08-08-2009\export\BRAC\CHW\commcare\input'
out_directory = r'C:\Source\hq\data\supervisor_export_all_08-08-2009\brac-pf'
domain = "BRAC"
#domain = "Pathfinder"
files = os.listdir(directory)
count = 0
for file in files:
if "xml" in file:
filename = (os.path.join(directory,file))
file = open(filename, "rb")
dir, short_filename = os.path.split(filename)
#new_dir = os.path.join(dir, "out")
new_dir = out_directory
payload = file.read()
file.close()
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_filename = short_filename.replace(".xml", ".postexport")
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(filename)
headers = { "content-type" : "text/xml",
"content-length" : str(len(payload)),
"time-received" : str(datetime.fromtimestamp(mtime)),
"original-ip" : "192.168.7.211",
"domain" : domain
}
fout = open(os.path.join(new_dir, new_filename), 'w')
jsoned = json.dumps(headers)
fout.write(jsoned)
fout.write("\n\n")
fout.write(payload)
fout.close()
count = count + 1
print "exported %s forms from %s to %s" % (count, directory, out_directory)
|
{
"content_hash": "8fe06c5be0592edde8517886da1f44ce",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 96,
"avg_line_length": 35.875,
"alnum_prop": 0.5860627177700348,
"repo_name": "fredwilliam/PMO",
"id": "26710028aaa2812ef0ada3c39f8b6f2d855dc287",
"size": "1481",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "utilities/data_migration/supervisor_export_script.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "553345"
},
{
"name": "PHP",
"bytes": "2787"
},
{
"name": "Python",
"bytes": "3626515"
},
{
"name": "Shell",
"bytes": "487"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class ApiextensionsV1ServiceReference(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'namespace': 'str',
'path': 'str',
'port': 'int'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace',
'path': 'path',
'port': 'port'
}
def __init__(self, name=None, namespace=None, path=None, port=None, local_vars_configuration=None): # noqa: E501
"""ApiextensionsV1ServiceReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._namespace = None
self._path = None
self._port = None
self.discriminator = None
self.name = name
self.namespace = namespace
if path is not None:
self.path = path
if port is not None:
self.port = port
@property
def name(self):
"""Gets the name of this ApiextensionsV1ServiceReference. # noqa: E501
name is the name of the service. Required # noqa: E501
:return: The name of this ApiextensionsV1ServiceReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApiextensionsV1ServiceReference.
name is the name of the service. Required # noqa: E501
:param name: The name of this ApiextensionsV1ServiceReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this ApiextensionsV1ServiceReference. # noqa: E501
namespace is the namespace of the service. Required # noqa: E501
:return: The namespace of this ApiextensionsV1ServiceReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this ApiextensionsV1ServiceReference.
namespace is the namespace of the service. Required # noqa: E501
:param namespace: The namespace of this ApiextensionsV1ServiceReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
self._namespace = namespace
@property
def path(self):
"""Gets the path of this ApiextensionsV1ServiceReference. # noqa: E501
path is an optional URL path at which the webhook will be contacted. # noqa: E501
:return: The path of this ApiextensionsV1ServiceReference. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ApiextensionsV1ServiceReference.
path is an optional URL path at which the webhook will be contacted. # noqa: E501
:param path: The path of this ApiextensionsV1ServiceReference. # noqa: E501
:type: str
"""
self._path = path
@property
def port(self):
"""Gets the port of this ApiextensionsV1ServiceReference. # noqa: E501
port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. # noqa: E501
:return: The port of this ApiextensionsV1ServiceReference. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this ApiextensionsV1ServiceReference.
port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility. # noqa: E501
:param port: The port of this ApiextensionsV1ServiceReference. # noqa: E501
:type: int
"""
self._port = port
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiextensionsV1ServiceReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiextensionsV1ServiceReference):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "54d741c474fee396ee07b124d69fcfcf",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 197,
"avg_line_length": 32.179611650485434,
"alnum_prop": 0.6014481822295972,
"repo_name": "kubernetes-client/python",
"id": "cbdefa1c3bfc4934a8da74fecaaa118fd8d41c0c",
"size": "6646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/apiextensions_v1_service_reference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
}
|
from HTMLParser import HTMLParser
import os
from sgmllib import SGMLParser
import urlparse
__author__ = 'Kelvin'
import urllib2
class FileDownloader(object):
def __init__(self, file_url, save_path='./', min_size=0):
self.url = file_url
self.save_path = save_path
self.min_size = min_size
def download(self):
file_name = os.path.join(self.save_path, self.url.split('/')[-1])
url_obj = urllib2.urlopen(self.url)
download_file = open(file_name, 'wb')
meta_data = url_obj.info()
file_size = int(meta_data.getheaders("Content-Length")[0])
if self.min_size > 0 and file_size < min_size:
print 'file is too small to download'
return
#print "Downloading: %s Bytes: %s" % (file_name, file_size)
else:
file_size_dl = 0
block_sz = 8192
while True:
_buffer = url_obj.read(block_sz)
if not _buffer:
break
file_size_dl += len(_buffer)
download_file.write(_buffer)
status = r"=[downloading %s] [%10d / %s ] [%3.2f%%]" % (file_name,
file_size_dl,
file_size,
file_size_dl * 100. / file_size)
status += chr(8) * (len(status) + 1)
print status,
download_file.close()
class URLLister(HTMLParser):
def reset(self):
SGMLParser.reset(self)
self.urls = []
self.imgs = []
self.gzs = []
def handle_starttag(self, tag, attrs):
pass
def start_a(self, attrs):
href = [v for k, v in attrs if k == "href" and v.startswith("http")]
if href:
print
self.urls.extend(href)
def start_img(self, attrs):
src = [v for k, v in attrs if k == "src" and v.startswith("http")]
if src:
self.imgs.extend(src)
def start_gz(self, attrs):
href = [v for k, v in attrs if k == "href" and v.endswith("gz")]
if href:
self.gzs.extend(href)
files_list = []
def get_url_of_page(url, if_img=False, if_gz=False):
urls = []
try:
f = urllib2.urlopen(url, timeout=1).read()
print f
url_listen = URLLister()
url_listen.feed(f)
if if_img:
urls.extend(url_listen.imgs)
if if_gz:
urls.extend(url_listen.gzs)
else:
urls.extend(url_listen.urls)
except urllib2.URLError, e:
print e.reason
return urls
def get_page_html(begin_url, depth, ignore_outer, main_site_domain):
if ignore_outer:
if not main_site_domain in begin_url:
return
if depth == 1:
urls = get_url_of_page(begin_url, False, True)
files_list.extend(urls)
else:
urls = get_url_of_page(begin_url)
if urls:
for url in urls:
get_page_html(url, depth - 1)
def download_file(save_path, min_size):
print 'start downloading...'
for _file in files_list:
print _file
downloader = FileDownloader(_file, save_path, min_size)
downloader.download()
print 'finish download.'
if __name__ == "__main__":
#url = "http://localhost:9200/pycharm-124.253.dmg"
#url = "http://localhost:9200"
url = "http://172.22.2.225:9000"
#_file = FileDownloader(url)
#_file.download()
save_path = "/tmp/kelvin"
if not os.path.exists(save_path):
os.mkdir(save_path)
min_size = 80
max_depth = 1
ignore_outer = True
main_site_domain = urlparse.urlsplit(url).netloc
get_page_html(url, max_depth, ignore_outer, main_site_domain)
download_file(save_path, min_size)
|
{
"content_hash": "b01f34123d50449bb6f6898edda8c9d7",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 104,
"avg_line_length": 28.25179856115108,
"alnum_prop": 0.5215176979882862,
"repo_name": "seawaywen/memodir_skels",
"id": "8cb084532bdfa352ca1b766b0a5b6492b2837781",
"size": "3927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memodir/skels/templates/buildout_project/utils/file_retrieve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "533966"
},
{
"name": "JavaScript",
"bytes": "1295964"
},
{
"name": "Python",
"bytes": "49246"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
}
|
import logging
import json
from logging import StreamHandler, Formatter
from ethereum.utils import bcolors, isnumeric
DEFAULT_LOGLEVEL = 'INFO'
JSON_FORMAT = '%(message)s'
PRINT_FORMAT = '%(levelname)s:%(name)s\t%(message)s'
TRACE = 5
known_loggers = set()
log_listeners = []
# add level trace into logging
def _trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
logging.Logger.trace = _trace
logging.TRACE = TRACE
logging.addLevelName(TRACE, "TRACE")
class LogRecorder(object):
"""
temporarily records all logs, w/o level filtering
use only once!
"""
max_capacity = 1000 * 1000 # check we are not forgotten or abused
def __init__(self):
self._records = []
log_listeners.append(self._add_log_record)
def pop_records(self):
# can only be called once
r = self._records[:]
self._records = None
log_listeners.remove(self._add_log_record)
return r
def _add_log_record(self, msg):
self._records.append(msg)
assert len(self._records) < self.max_capacity
def get_configuration():
"""
get a configuration (snapshot) that can be used to call configure
snapshot = get_configuration()
configure(**snapshot)
"""
root = getLogger()
name_levels = [('', logging.getLevelName(root.level))]
for name, logger in list(root.manager.loggerDict.items()):
name_levels.append((name, logging.getLevelName(logger.level)))
config_string = ','.join('%s:%s' % x for x in name_levels)
return dict(config_string=config_string, log_json=root.log_json)
def get_logger_names():
return sorted(known_loggers, key=lambda x: '' if not x else x)
class BoundLogger(object):
def __init__(self, logger, context):
self.logger = logger
self.context = context
def bind(self, **kwargs):
return BoundLogger(self, kwargs)
def _proxy(self, method_name, *args, **kwargs):
context = self.context.copy()
context.update(kwargs)
return getattr(self.logger, method_name)(*args, **context)
trace = lambda self, *args, **kwargs: self._proxy('trace', *args, **kwargs)
debug = lambda self, *args, **kwargs: self._proxy('debug', *args, **kwargs)
info = lambda self, *args, **kwargs: self._proxy('info', *args, **kwargs)
warn = warning = lambda self, *args, **kwargs: self._proxy('warning', *args, **kwargs)
error = lambda self, *args, **kwargs: self._proxy('error', *args, **kwargs)
exception = lambda self, *args, **kwargs: self._proxy('exception', *args, **kwargs)
fatal = critical = lambda self, *args, **kwargs: self._proxy('critical', *args, **kwargs)
class SLogger(logging.Logger):
def __init__(self, name, level=DEFAULT_LOGLEVEL):
self.warn = self.warning
super(SLogger, self).__init__(name, level=level)
def is_active(self, level_name='trace'):
return self.isEnabledFor(logging._checkLevel(level_name.upper()))
def format_message(self, msg, kwargs, highlight):
if getattr(self, 'log_json', False):
message = {
k: v if isnumeric(v) or isinstance(v, (float, complex)) else repr(v)
for k, v in kwargs.items()
}
message['event'] = "{}.{}".format(self.name, msg)
msg = json.dumps(message)
else:
msg = "{}{} {}{}".format(
bcolors.WARNING if highlight else "",
msg,
" ".join("{}={!s}".format(k, v) for k, v in kwargs.items()),
bcolors.ENDC if highlight else ""
)
return msg
def bind(self, **kwargs):
return BoundLogger(self, kwargs)
def _log(self, level, msg, args, **kwargs):
exc_info = kwargs.pop('exc_info', None)
extra = kwargs.pop('extra', {})
highlight = kwargs.pop('highlight', False)
extra['kwargs'] = kwargs
extra['original_msg'] = msg
msg = self.format_message(msg, kwargs, highlight)
super(SLogger, self)._log(level, msg, args, exc_info, extra)
def DEV(self, msg, *args, **kwargs):
"""Shortcut to output highlighted log text"""
kwargs['highlight'] = True
self.critical(msg, *args, **kwargs)
class RootLogger(SLogger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
super(RootLogger, self).__init__("root", level)
self.log_json = False
def handle(self, record):
if log_listeners:
rec_dict = getattr(record, 'kwargs', {}).copy()
rec_dict['event'] = getattr(record, 'original_msg', "")
for listener in log_listeners:
listener(rec_dict)
super(RootLogger, self).handle(record)
class SManager(logging.Manager):
def __init__(self, rootnode):
self.loggerClass = SLogger
super(SManager, self).__init__(rootnode)
def getLogger(self, name):
logging.setLoggerClass(SLogger)
return super(SManager, self).getLogger(name)
rootLogger = RootLogger(DEFAULT_LOGLEVEL)
SLogger.root = rootLogger
SLogger.manager = SManager(SLogger.root)
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
logger = SLogger.manager.getLogger(name)
logger.log_json = rootLogger.log_json
return logger
else:
return rootLogger
def configure(config_string=None, log_json=False):
if not config_string:
config_string = ":{}".format(DEFAULT_LOGLEVEL)
if log_json:
log_format = JSON_FORMAT
rootLogger.log_json = True
else:
log_format = PRINT_FORMAT
rootLogger.log_json = False
if len(rootLogger.handlers) == 0:
handler = StreamHandler()
formatter = Formatter(log_format)
handler.setFormatter(formatter)
rootLogger.addHandler(handler)
# Reset logging levels before applying new config below
for name, logger in SLogger.manager.loggerDict.items():
if hasattr(logger, 'setLevel'):
# Guard against `logging.PlaceHolder` instances
logger.setLevel(logging.NOTSET)
for name_levels in config_string.split(','):
name, _, level = name_levels.partition(':')
logger = getLogger(name)
logger.setLevel(level.upper())
configure_logging = configure
def set_level(name, level):
assert not isinstance(level, int)
logger = getLogger(name)
logger.setLevel(getattr(logging, level.upper()))
def get_logger(name=None):
known_loggers.add(name)
return getLogger(name)
def DEBUG(msg, *args, **kwargs):
"""temporary logger during development that is always on"""
logger = getLogger("DEBUG")
logger.addHandler(StreamHandler())
logger.propagate = False
logger.setLevel(logging.DEBUG)
logger.DEV(msg, *args, **kwargs)
|
{
"content_hash": "bab1e77ab004fe3d0f4100aa6da1c247",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 93,
"avg_line_length": 29.60655737704918,
"alnum_prop": 0.618078626799557,
"repo_name": "vaporry/pyethereum",
"id": "eb9d59c324ab8fbe42fdea67a4cb82e51f1a2b8b",
"size": "7224",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ethereum/slogging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2045"
},
{
"name": "Python",
"bytes": "454620"
}
],
"symlink_target": ""
}
|
{
'(Recipient)': '(Empfänger)',
"'Cancel' will indicate an asset log entry did not occur": "'Abbrechen' zeigt an, dass ein Asset Log Eintrag nicht eingetreten ist",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Eine Position, die den geografischen Bereich für diese Region definiert. Dies kann ein Standort aus der Standorthierarchie, oder ein Gruppenstandort, oder ein Standort mit Grenzbereich sein.',
"Acronym of the organization's name, eg. IFRC.": 'Abkürzung des Organisationsnamen, z. B. IFRC.',
"Authenticate system's Twitter account": 'Authentifizierung für den Twitter Account des Systems',
"Can't import tweepy": 'Tweepy kann nicht importiert werden',
"Caution: doesn't respect the framework rules!": 'Achtung: Die Rahmenbedingungen des Frameworks werden nicht beachtet!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatieren Sie die Liste der Attributwerte und die RGB-Wert zur Verwendung dieser als ein JSON-Objekt, z. B.: {Rot: '#FF0000 ', grün: '#00FF00 ', gelb: '#FFFF00 '}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Wenn ausgewählt, wird der Ort dieser Anlage immer aktualisiert, sobald der Standort der Person aktualisiert wird.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Wenn diese Konfiguration einen Bereich für die Regionenauswahl repräsentiert, geben Sie einen Namen für die Verwendung in der Auswahl. Der Name für eine persönliche Kartenkonfiguration wird mit dem Namen des Benutzers festgelegt.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer, der diese Organisation definiert, automatisch als Mitarbeiter dieser Organisation zugeordnet sobald er sich anmeldet, ausgenommen die Domäne stimmt nicht mit dem Domänenfeld überein.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Wenn dies angekreuzt ist, wird es die Basisposition des Benutzers und dadurch gesteuert wo der Benutzer auf der Karte angezeigt wird.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Wenn sie das Krankenhaus nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Krankenhaus hinzufügen' anklicken.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Wenn sie das Büro nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Büro hinzufügen' anklicken.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Wenn sie die Organisation nicht in der Liste sehen, dann können sie eine neue hinzufügen indem sie auf den Link "Organisation hinzufügen" klicken.',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Anstelle der automatischen Synchronisation von anderen Peers über das Netz, können sie auch über Dateien synchronisieren, was nötig ist, wenn kein Netzwerk vorhanden ist. Sie können diese Seite verwenden um Sync Daten aus Dateien zu importieren and auch um Daten in Form von Sync Dateien zu exportieren. Ein Klick auf den Link rechts bringt Sie zu dieser Seite.',
"Level is higher than parent's": 'Die Stufe ist höher als das übergeordnete Element',
"Need a 'url' argument!": "Braucht eine 'url' als Argument!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. Der Name der Geometrie-Spalte. In PostGIS ist der Standardwert 'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": 'Übergeordnete Ebene muss höher als dieser Eintrag. Die Stufe seines Eltern Elements ist',
"Password fields don't match": 'Kennwortfelder stimmer nicht überein',
"Phone number to donate to this organization's relief efforts.": 'Telefonnummer für Spenden an diese Nothilfeorganisation.',
"Please come back after sometime if that doesn't help.": 'Wenn das nicht hilft, kommen Sie nach einiger Zeit bitte wieder.',
"Quantity in %s's Inventory": "Menge in %s's Bestand",
"Select a Room from the list or click 'Create Room'": "Wählen Sie einen Raum aus der Liste oder klicken Sie auf 'Raum hinzufügen'",
"Select a person in charge for status 'assigned'": 'Wählen Sie eine verantwortliche Person aus für den Status "zugeordnet"',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche auf der untersten Hierarchieebene einen übergeordneten Zuständigkeitsbereich brauchen. Beispiel: Wenn 'district' der kleinste Bereich in der Hierarchie ist, dann müssen alle speziellen Bereiche einen 'district' als übergeordnetes Element haben.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche einen übergeordneten Zuständigkeitsbereich in der Gebietshierarchie brauchen. Es kann dabei hilfreich sein eine "region" festzulegen, die den betroffenen Bereich repräsentiert.',
"Sorry, things didn't get done on time.": 'Leider konnten die Aufgaben nicht rechtzeitig ausgeführt werden.',
"Sorry, we couldn't find that page.": 'Leider konnte diese Seite nicht gefunden werden.',
"System's Twitter account updated": 'Der Twitter Account des Systems wurde aktualisiert',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Die Spender für dieses Projekt. Mehrere Werte können durch Halten der 'Steuerungstaste' (Strg / Ctrl) ausgewählt werden.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'Die URL der Bilddatei. Wenn Sie keine Grafikdatei hochladen, dann müssen Sie hier eine URL angeben.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einem Namen zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Um nach einem Körper zu suchen, geben Sie die Identifikationsmarken-Nummer des Körpers ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Körper.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben sie entweder den Namen, die ID, den Organisationsnamen oder ein Acronym jeweils getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben Sie Namen oder die ID des Krankenhauses getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Um einen Ort zu suchen, geben Sie den Namen ein. Sie können % als Wildcard verwenden. Die Auswahl von Drücken 'Suchen' ohne Eingabe führt zur Auflistung aller Orte.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einer Person zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Für die Suche nach einer Bewertung, geben Sie einen beliebigen Teil der Ticketnummer der Bewertung ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Bewertungen.",
"Type the first few characters of one of the Person's names.": 'Geben Sie die ersten paar Zeichen des Namens einer Person ein.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Laden Sie hier die Grafikdatei hoch. Wenn sie keine Grafikdatei hochladen, dann müssen Sie im Feld eine URL auf eine im Web verfügbare Grafikdatei angeben.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Beim Synchronisieren der Daten mit anderen Installationen, können Konflikte auftreten wenn beide (oder mehrere) Parteien die gleichen Daten geändert haben, d. h. widersprüchliche Informationen vorliegen. Das Synchronisationsmodul versucht solche Konflikte automatisch zu beheben, was jedoch in manchen Fällen nicht möglich ist. In solchen Fällen ist es Ihre Aufgabe, diese Konflikte manuell zu beheben; klicken Sie auf den rechten Link, um auf diese Seite zu gelangen.',
"You haven't made any calculations": 'Sie haben keine Brechnungen gemacht',
"couldn't be parsed so NetworkLinks not followed.": 'konnte nicht interpretiert so dass Netzwerklinks nicht verfolgt werden.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Enthält ein GroundOverlay oder ScreenOverlay die in OpenLayers noch nicht unterstützt werden, es wird möglicherweise nicht richtig funktionieren.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" ist ein optionaler Ausdruck wie "field1=\'newvalue\'\\ ". Sie können die Ergebnisse eines JOINs nicht aktualisieren oder löschen.',
'# of International Staff': '# der internationalen Mitarbeiter',
'# of National Staff': '# der nationalen Mitarbeiter',
'# of Vehicles': '# der Fahrzeuge',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\n Wenn der Typ des Requests "%(type)s" ist, geben Sie die %(type)s bitte auf der nächsten Seite ein.',
'%(system_name)s - Verify Email': '%(system_name)s - Email überprüfen',
'%s rows deleted': '%s gelöschte Zeilen',
'%s rows updated': '%s Zeilen aktualisiert',
'& then click on the map below to adjust the Lat/Lon fields': '& anschließend klicken Sie auf die Karte weiter unten um die Längen- und Breitengradwerte zu korrigieren',
'* Required Fields': '* erforderliche Felder',
'0-15 minutes': '0 - 15 Minuten',
'1 Assessment': '1 Bewertung',
'1 location, shorter time, can contain multiple Tasks': '1 Position, kürzere Zeit, kann mehrere Aufgaben beinhalten',
'1-3 days': '1-3 Tage',
'15-30 minutes': '15-30 Minuten',
'2 different options are provided here currently:': '2 verschiedene Optionen stehen hier derzeit zur Verfügung:',
'2x4 Car': 'Fahrzeug mit einer Antriebsachse',
'30-60 minutes': '30-60 Minuten',
'4-7 days': '4-7 Tage',
'4x4 Car': 'Allradfahrzeug',
'8-14 days': '8-14 Tage',
'3W': 'Wer? Was? Wo?',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Es kann eine Zuordnung eines Symbol zu einer individuellen Position erfolgen, um damit die Symbolisierung der Objektklasse zu überschreiben.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Ein Referenzdokument wie z. B. eine Datei, URL oder Ansprechpartner zur Überprüfung dieser Daten. Sie können die ersten Zeichen eines vorhandenen Dokumentnamens eingeben um dieses zu referenzieren.',
'A brief description of the group (optional)': 'Eine kurze Beschreibung der Gruppe (optional)',
'A catalog of different Assessment Templates including summary information': 'Ein Katalog von verschiedenen Beurteilungsvorlagen inklusive einer Zusammenfassung',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Eine Datei von einem GPS Gerät das eine Reihe von geographischen Positionen im XML-Format enthält.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Eine Datei im GPX-Format aus einem GPS Gerät deren Zeitstempel genutzt werden können, um sie mit den Zeitstempeln von Fotos zu verknüpfen und diese dann auf einer Karte darzustellen.',
'A library of digital resources, such as photos, documents and reports': 'Eine Bibliothek von digitalen Ressourcen, wie z. B. Fotos, Dokumente und Berichte',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Eine Gebietsgruppe kann verwendet werden, um den Bereich eines betroffenen Gebietes zu definieren, falls dieses nicht mit einer vorhandenen administrativen Einheit zusammenfällt.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Eine Gebietsgruppe besteht aus mehreren Gebieten (häufig eine Gruppe von Verwaltungsregionen, die einen eigenen Zuständigkeitsbereich bilden).',
'A location group must have at least one member.': 'Eine Gebietsgruppe muss mindestens ein Element beinhalten.',
'ABOUT THIS MODULE': 'ÜBER DIESES MODUL',
'ACCESS DATA': 'ZUGRIFFSDATEN',
'Actioning officer': 'Verantwortliche Person',
'ANY': 'Irgendwelche',
'API is documented here': 'Die API ist hier dokumentiert',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Schnelle Evaluierung - angepasst für Neuseeland',
'Abbreviation': 'Abkürzung',
'Ability to Fill Out Surveys': 'Möglichkeit Umfragen auszufüllen',
'Ability to customize the list of details tracked at a Shelter': 'Möglichkeit die Liste der Detailangaben zu einer Unterkunft anzupassen',
'Ability to customize the list of human resource tracked at a Shelter': 'Möglichkeit die Liste der menschlichen Ressourcen einer Unterkunft anzupassen',
'Ability to customize the list of important facilities needed at a Shelter': 'Möglichkeit die Liste mit den wichtigen Einrichtungen, die in einer Unterkunft benötigt werden, anzupassen',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Möglichkeit die Ergebnisse von abgeschlossen und/oder teilweise ausgefüllten Umfragen zu einzusehen',
'About': 'Über',
'About Us': 'Über uns',
'Accept Push': 'Akzeptiert Push',
'Access denied': 'Zugriff verweigert',
'Access to Shelter': 'Zugang zu Unterkünften',
'Access to education services': 'Zugang zu Ausbildungsdienstleistungen',
'Accessibility of Affected Location': 'Erreichbarkeit der betroffenen Region',
'Accompanied Child': 'Begleitetes Kind',
'Account Registered - Please Check Your Email': 'Benutzerkonto registriert - Bitte überprüfen Sie Ihre E-Mail',
'Account SID': 'SID des Accounts',
'Acronym': 'Abkürzung',
'Actionable by all targeted recipients': 'Bearbeitbar von allen adressierten Empfängern',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Bearbeitbar nur von bestimmten Übungsteilnehmern; Übungsidentifikator sollte unter <note> auftauchen',
'Actioned?': 'Bearbeitet?',
'Actions taken as a result of this request.': 'Als Ergebnis auf diese Anfrage gestartete Aktionen.',
'Actions': 'Aktionen',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktivieren Sie Ereignisse aus den SZENARIO Vorlagen um die passenden Ressourcen zuzuordnen (Menschen, Anlagen und Einrichtungen).',
'Active Problems': 'Aktive Probleme',
'Active': 'aktiv',
'Activities matching Assessments': 'Aktivitäten passend zur Beurteilung',
'Activities of boys 13-17yrs before disaster': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren vor der Katastrophe',
'Activities of boys 13-17yrs now': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren heute',
'Activities of boys <12yrs before disaster': 'Aktivitäten von Jungen unter 12 Jahren vor der Katastrophe',
'Activities of boys <12yrs now': 'Aktivitäten von Jungen unter 12 Jahren heute',
'Activities of children': 'Aktivitäten von Kindern',
'Activities of girls 13-17yrs before disaster': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren vor der Katastrophe',
'Activities of girls 13-17yrs now': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren heute',
'Activities of girls <12yrs before disaster': 'Aktivitäten von Mädchen unter 12 Jahren vor der Katastrophe',
'Activities of girls <12yrs now': 'Aktivitäten von Mädchen unter 12 Jahre heute',
'Activities': 'Aktivitäten',
'Activity Added': 'Aktivität hinzugefügt',
'Activity Deleted': 'Aktivität gelöscht',
'Activity Details': 'Details zur Aktivität',
'Activity Report': 'Bericht zur Aktivität',
'Activity Reports': 'Berichte zu Aktivitäten',
'Activity Type': 'Typ der Aktivität',
'Activity Types': 'Typen von Aktivität',
'Activity Updated': 'Aktivität aktualisiert',
'Activity': 'Aktivität',
'Add Activity Type': 'Aktivitätstyp hinzufügen',
'Add Address': 'Adresse hinzufügen',
'Add Alternative Item': 'Alternativen Artikel hinzufügen',
'Add Assessment Summary': 'Zusammenfassung der Beurteilung hinzufügen',
'Add Assessment': 'Beurteilung hinzufügen',
'Add Asset Log Entry - Change Label': 'Bestandsprotokoll Eintrag hinzufügen - Beschriftung verändern',
'Add Availability': 'Verfügbarkeit hinzufügen',
'Add Baseline Type': 'Basislinien-Typ hinzufügen',
'Add Baseline': 'Basislinie hinzufügen',
'Add Bundle': 'Paket hinzufügen',
'Add Camp Service': 'Camp-Dienst hinzufügen',
'Add Camp Type': 'Camp Art hinzufügen',
'Add Camp': 'Camp hinzufügen',
'Add Certificate for Course': 'Zertifikat für Kurs hinzufügen',
'Add Certification': 'Zertifizierung hinzufügen',
'Add Competency': 'Qualifikation hinzufügen',
'Add Contact': 'Kontaktperson hinzufügen',
'Add Contact Information': 'Kontaktinformation hinzufügen',
'Add Credential': 'Qualifikation hinzufügen',
'Add Credentials': 'Qualifikationen hinzufügen',
'Add Disaster Victims': 'Katastrophenopfer hinzufügen',
'Add Distribution.': 'Verteilung hinzufügen.',
'Add Donor': 'Spender hinzufügen',
'Add Flood Report': 'Flut Bericht hinzufügen',
'Add Group Member': 'Gruppenmitglied hinzufügen',
'Add Human Resource': 'Personal hinzufügen',
'Add Identity': 'Identität hinzufügen',
'Add Image': 'Bild hinzufügen',
'Add Impact Type': 'Auswirkungstyp Hinzufügen',
'Add Impact': 'Auswirkung hinzufügen',
'Add Item to Catalog': 'Artikel zu Katalog hinzufügen',
'Add Item to Commitment': 'Eintrag zur Zusage hinzufügen',
'Add Item to Inventory': 'Artikel zu Inventar hinzufügen',
'Add Item to Request': 'Artikel zur Anforderung hinzufügen',
'Add Item to Shipment': 'Artikel der Lieferung hinzufügen',
'Add Item': 'Artikel hinzufügen',
'Add Job Role': 'Tätigkeit hinzufügen',
'Add Key': 'Schlüssel hinzufügen',
'Add Kit': 'Ausstattung (Kit) hinzufügen',
'Add Layer to this Profile': 'Kartenebene zu diesem Profil hinzufügen',
'Add Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add Location': 'Standort hinzufügen',
'Add Log Entry': 'Protokolleintrag hinzufügen',
'Add Member': 'Mitglied hinzufügen',
'Add Membership': 'Mitgliedschaft hinzufügen',
'Add Message': 'Nachricht hinzufügen',
'Add Mission': 'Auftrag hinzufügen',
'Add Mobile Commons Settings': 'Mobile Commons Einstellungen hinzufügen',
'Add Need Type': 'Bedarfstyp hinzufügen',
'Add Need': 'Bedarf hinzufügen',
'Add New Assessment Summary': 'Neue Beurteilungsbeschreibung hinzufügen',
'Add New Baseline Type': 'Einen neuen Grundlinientyp hinzufügen',
'Add New Baseline': 'Eine neue Grundlinie hinzufügen',
'Add New Budget': 'Ein neues Budget hinzufügen',
'Add New Bundle': 'Ein neues Paket hinzufügen',
'Add New Camp Service': 'Neuen Camp Service hinzufügen',
'Add New Camp Type': 'Neuen Camp Typ hinzufügen',
'Add New Camp': 'Neues Camp hinzufügen',
'Add New Cluster Subsector': 'Neuen Cluster Unterbereich hinzufügen',
'Add New Cluster': 'Neuen Cluster hinzufügen',
'Add New Commitment Item': 'Zugesagten Artikel hinzufügen',
'Add New Document': 'Neues Dokument hinzufügen',
'Add New Donor': 'Neuen Spender hinzufügen',
'Add New Entry': 'Neuen Eintrag hinzufügen',
'Add New Event': 'Neues Ereignis hinzufügen',
'Add New Flood Report': 'Neuen Flutbericht hinzufügen',
'Add New Human Resource': 'Neue Human Resource hinzufügen',
'Add New Image': 'Neue Grafik hinzufügen',
'Add New Impact Type': 'Neuen Auswirkungstyp hinzufügen',
'Add New Impact': 'Neue Auswirkung hinzufügen',
'Add New Item to Kit': 'Neuen Artikel zur Ausstattung (Kit) hinzufügen',
'Add New Key': 'Neuen Schlüssel hinzufügen',
'Add New Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add New Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add New Member': 'Neues Mitglied hinzufügen',
'Add New Membership': 'Neue Mitgliedschaft hinzufügen',
'Add New Need Type': 'Neuen Bedarfstyp hinzufügen',
'Add New Need': 'Neuen Bedarf hinzufügen',
'Add New Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add New Problem': 'Neues Problem hinzufügen',
'Add New Rapid Assessment': 'Neue Schnell-Beurteilung hinzufügen',
'Add New Received Item': 'Neuen erhaltenen Artikel hinzufügen',
'Add New Record': 'Neuen Datensatz hinzufügen',
'Add New Request Item': 'Neuen Anfrageartikel hinzufügen',
'Add New Request': 'Neue Anfrage hinzufügen',
'Add New River': 'Neuen Fluss hinzufügen',
'Add New Role to User': 'Benutzer eine neue Rolle zuweisen',
'Add New Scenario': 'Neues Szenario hinzufügen',
'Add New Sent Item': 'Neuen gesendeten Artikel hinzufügen',
'Add New Setting': 'Neue Einstellung hinzufügen',
'Add New Solution': 'Neue Lösung hinzufügen',
'Add New Staff Type': 'Neue Mitarbeitertyp hinzufügen',
'Add New Subsector': 'Neuen Teilbereich hinzufügen',
'Add New Survey Answer': 'Neue Antwort zur Umfrage hinzufügen',
'Add New Survey Question': 'Neue Frage zur Umfrage hinzufügen',
'Add New Survey Series': 'Neue Umfrageserie hinzufügen',
'Add New Survey Template': 'Neue Umfragevorlage hinzufügen',
'Add New Team': 'Neues Team hinzufügen',
'Add New Ticket': 'Neues Ticket hinzufügen',
'Add New Track': 'Neuen Pfad hinzufügen',
'Add New User to Role': 'Neuen Benutzer der Rolle hinzufügen',
'Add New': 'Neu hinzufügen',
'Add Organization Domain': 'Organisationsdomain hinzufügen',
'Add Peer': 'Peer-Zugriffspunkt hinzufügen',
'Add Person': 'Person hinzufügen',
'Add Photo': 'Foto hinzufügen',
'Add PoI': 'PoI hinzufügen',
'Add Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add Position': 'Position hinzufügen',
'Add Problem': 'Problem hinzufügen',
'Add Question': 'Frage hinzufügen',
'Add Rapid Assessment': 'Schnell-Beurteilung hinzufügen',
'Add Record': 'Datensatz hinzufügen',
'Add Reference Document': 'Referenzdokument hinzufügen',
'Add Report': 'Bericht hinzufügen',
'Add Request': 'Anfrage hinzufügen',
'Add Section': 'Abschnitt hinzufügen',
'Add Setting': 'Einstellung hinzufügen',
'Add Skill': 'Fähigkeit hinzufügen',
'Add Skill Equivalence': 'Fähigkeitsäquivalenz hinzufügen',
'Add Skill Provision': 'Fähigkeitsbestimmung hinzufügen',
'Add Skill to Request': 'Fähigkeit zur Anfrage hinzufügen',
'Add Solution': 'Lösung hinzufügen',
'Add Staff Type': 'Mitarbeitertyp hinzufügen',
'Add Subscription': 'Abonnement hinzufügen',
'Add Subsector': 'Teilbereich hinzufügen',
'Add Survey Answer': 'Umfrageantwort hinzufügen',
'Add Survey Question': 'Umfrage Frage hinzufügen',
'Add Survey Series': 'Umfrage Serie hinzufügen',
'Add Survey Template': 'Umfrage Vorlage hinzufügen',
'Add Team Member': 'Teammitglied hinzufügen',
'Add Team': 'Team hinzufügen',
'Add Ticket': 'Ticket hinzufügen',
'Add to Bin': 'Zum Lagerbehälter hinzufügen',
'Add Training': 'Schulung hinzufügen',
'Add Twilio Channel': 'Twilio Kanal hinzufügen',
'Add Twitter Channel': 'Twitter Kanal hinzufügen',
'Add Unit': 'Einheit hinzufügen',
'Add Vehicle': 'Fahrzeug hinzufügen',
'Add Vehicle Type': 'Fahrzeugtyp hinzufügen',
'Add Volunteer Availability': 'Verfügbarkeit von Freiwilligen hinzufügen',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Fügen Sie ein Referenzdokument z. B. eine Datei, URL oder einen Ansprechpartner zur Überprüfung dieser Daten ein. Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt.',
'Add a Volunteer': 'Einen Freiwilligen hinzufügen',
'Add a new certificate to the catalog.': 'Hinzufügen eines neuen Zertifikats zum Katalog',
'Add a new competency rating to the catalog.': 'Neue Kompetenzeinstufung zum Katalog hinzufügen',
'Add a new course to the catalog.': 'Neuen Kurs zum Katalog hinzufügen',
'Add a new job role to the catalog.': 'Neue Tätigkeit zum Katalog hinzufügen',
'Add a new skill provision to the catalog.': 'Neue Bereitstellung einer Fähigkeit zum Katalog hinzufügen',
'Add a new skill to the catalog.': 'Neue Fähigkeit zum Katalog hinzufügen',
'Add a new skill type to the catalog.': 'Neue Fähigkeitsart zum Katalog hinzufügen.',
'Add new Group': 'Neue Gruppe hinzufügen',
'Add new Individual': 'Hinzufügen neues Individuum',
'Add new project.': 'Neues Projekt hinzufügen.',
'Add staff members': 'Mitarbeiter hinzufügen',
'Add strings manually': 'Texte händisch hinzufügen',
'Add to a Team': 'Zu einem Team hinzufügen',
'Add to Bundle': 'Zu Paket hinzufügen',
'Add to budget': 'Zum Budget hinzufügen',
'Add volunteers': 'Freiwillige hinzufügen',
'Add': 'Hinzufügen',
'Add/Edit/Remove Layers': 'Hinzufügen/Bearbeiten/Entfernen von Kartenebenen',
'Added to Group': 'Zur Gruppe hinzugefügt',
'Added to Team': 'Zum Team hinzugefügt',
'Additional Beds / 24hrs': 'Zusätzliche Betten / 24 Std.',
'Address Details': 'Details zur Adresse',
'Address Type': 'Typ der Adresse',
'Address added': 'Adresse hinzugefügt',
'Address deleted': 'Adresse gelöscht',
'Address updated': 'Adresse aktualisiert',
'Address': 'Adresse',
'Addresses': 'Adressen',
'Adequate food and water available': 'Angemessene Nahrung und Wasser verfügbar',
'Adequate': 'Angemessen',
'Adjust Stock Levels': 'Lagerbestand anpassen',
'Adjust Stock': 'Lagerbestand anpassen',
'Admin': 'Administration',
'Admin Email': 'Email Administrator ',
'Admin Name': 'Name Administrator',
'Admin Tel': 'Telefonnummer Administrator',
'Administration': 'Administrator',
'Administrative support cost': 'Kosten für administrative Unterstützung',
'Admissions/24hrs': 'Einlass / 24 Stunden',
'Adolescent (12-20)': 'Heranwachsende (12-20)',
'Adolescent participating in coping activities': 'Teenager Teilnahme an Aktivitäten kopieren',
'Adopted Child': 'Adoptiertes Kind',
'Adult (21-50)': 'Erwachsene (21-50)',
'Adult ICU': 'Erwachsene ICU',
'Adult Psychiatric': 'Erwachsener - psychiatrisch auffällig',
'Adult female': 'Erwachsener - weiblich',
'Adult male': 'Erwachsener - männlich',
'Adults in prisons': 'Erwachsenen in Gefängnis',
'Advanced': 'Erweitert',
'Advanced Javascript Layers': 'Advanced Javascript Layers',
'Advisory': 'Beratend',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nach einem Klick auf den Button, wird ein Satz von gekoppelten Elemente nacheinander gezeigt werden. Bitte wählen Sie diejenige Lösung aus jedem Paar, die sie gegenüber der anderen bevorzugen.',
'Age': 'Alter',
'Age Group': 'Altersgruppe',
'Age group does not match actual age.': 'Altersgruppe passt nicht zum tatsächlichen Alter.',
'Age group': 'Altersgruppe',
'Aggravating factors': 'Erschwerende Faktoren',
'Aggregate': 'Zusammenstellung',
'Agriculture': 'Landwirtschaft',
'Air Transport Service': 'Lufttransportsservice',
'Aircraft Crash': 'Flugzeugabsturz',
'Aircraft Hijacking': 'Flugzeugentführung',
'Aircraft Maximum Size': 'Maximale Größe des Flugzeugs',
'Airports': 'Flughäfen',
'Airport Closure': 'Flughafenschließung',
'Airspace Closure': 'Luftraumsperrung',
'Alcohol': 'Alkohol',
'All Activities': 'Alle Aktivitäten',
'All Inbound & Outbound Messages are stored here': 'Alle eingehenden und abgehenden Nachrichten werden hier gespeichert',
'All Resources': 'Alle Ressourcen',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Alle von der Sahana Software Foundation bereitgestellten Daten dieser Seite sind unter der Creative Commons Attribution licence lizenziert. Es stammen jedoch nicht alle Daten von hier. Bitte beachten Sie das Quellen-Feld des jeweiligen Eintrags.',
'All': 'Alles',
'All Records': 'Alle Datensätze',
'Allocate Group': 'Gruppe zuweisen',
'Allowance': 'Taschengeld',
'Allowances': 'Taschengelder',
'Allowed to push': 'Dürfen push',
'Allows a Budget to be drawn up': 'Ermöglicht ein Budget aufzustellen.',
'Allows authorized users to control which layers are available to the situation map.': 'Erlaubt berechtigten Benutzern zu steuern, welche Kartenebenen auf der Lagekarte verfügbar sind.',
'Alternative Item Details': 'Details zum alternativen Artikel',
'Alternative Item added': 'Alternativer Artikel hinzugefügt.',
'Alternative Item deleted': 'Alternativer Artikel gelöscht',
'Alternative Item updated': 'Alternativer Artikel aktualisiert',
'Alternative Item': 'Alternativer Artikel',
'Alternative Items': 'Alternative Artikel',
'Alternative places for studying': 'Alternative Orte für das Studium',
'Ambulance Service': 'Ambulanter Krankendienst',
'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Es kann eine Beurteilungsvorlage zur Erstellung einer Katastrophenbeurteilung ausgewählt werden. Innerhalb der Katastrophenbeurteilung können Antworten gesammmelt und Ergebnisse in Form von Tabellen, Graphiken und Karten erzeugt werden.',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Ein Aufnahmesystem, ein Warenhausmanagementsystem, Warenlieferungsverfolgung, Versorgungskettenmanagement, Beschaffung und andere Anlagen-und Verwaltungsfunktionen.',
'An item which can be used in place of another item': 'Ein Artikel, der anstatt eines anderen Artikels verwendet werden kann',
'Analysis of Completed Surveys': 'Analyse von abgeschlossenen Umfragen',
'Animal Die Off': 'Tiere Sterben',
'Animal Feed': 'Tierfutter',
'Anthropology': 'Anthropologie',
'Antibiotics available': 'Antibiotika verfügbar',
'Antibiotics needed per 24h': 'Menge an Antibiotika die pro 24h benötigt wird',
'Apparent Age': 'Offensichtliches Alter',
'Apparent Gender': 'Offensichtliches Geschlecht',
'Application': 'Anwendung',
'Application Deadline': 'Anwendung Frist',
'Application Permissions': 'Anwendungsberechtigungen',
'Appointments': 'Termine',
'Appointment Type': 'Terminart',
'Approve': 'Bestätigen',
'Approved': 'Bestätigt',
'Approver': 'Bestätigende Stelle',
'Archived Cases': 'Archivierte Fälle',
'Arctic Outflow': 'Arktischer Abfluss',
'Areas inspected': 'Untersuchte Gebiete',
'Assessment Details': 'Details zur Beurteilung',
'Assessment Reported': 'Beurteilung gemeldet',
'Assessment Summaries': 'Zusammenfassungen der Beurteilung',
'Assessment Summary Details': 'Details zur Zusammenfassung der Beurteilung',
'Assessment Summary added': 'Zusammenfassung der Beurteilung hinzugefügt',
'Assessment Summary deleted': 'Zusammenfassung der Beurteilung gelöscht',
'Assessment Summary updated': 'Zusammenfassung der Beurteilung aktualisiert',
'Assessment added': 'Beurteilung hinzugefügt',
'Assessment admin level': 'Admin Ebene zur Beurteilung',
'Assessment deleted': 'Beurteilung gelöscht',
'Assessment timeline': 'Beurteilungszeitachse',
'Assessment updated': 'Beurteilung aktualisiert',
'Assessment': 'Beurteilung',
'Assessment Templates': 'Beurteilungsvorlage',
'Assessments Needs vs. Activities': 'Bedarf für Beurteilungen gegenüber den Aktivitäten',
'Assessments and Activities': 'Beurteilungen und Aktivitäten',
'Assessments': 'Beurteilungen',
'Assessor': 'Beurteilender',
'Asset Details': 'Details zur Anlage',
'Asset Log Details': 'Anlage Protokoll Details',
'Asset Log Empty': 'Anlage Protokoll leer',
'Asset Log Entry Added - Change Label': 'Anlage Protokolleintrag hinzugefügt - Beschriftung ändern',
'Asset Log Entry deleted': 'Anlage Protokolleintrag gelöscht',
'Asset Log Entry updated': 'Anlage Protokolleintrag aktualisiert',
'Asset Management': 'Anlageverwaltung',
'Asset Number': 'Anlagenummer',
'Asset added': 'Anlage hinzugefügt',
'Asset deleted': 'Anlage gelöscht',
'Asset removed': 'Anlage entfernt',
'Asset updated': 'Anlage aktualisiert',
'Asset': 'Anlage',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Anlagen sind Ressourcen, die nicht verbrauchbar sind aber zurück erwartet werden, daher müssen sie nachverfolgt werden.',
'Assets': 'Anlagen',
'Assign Group': 'Gruppe zuordnen',
'Assign Staff': 'Mitarbeiter zuordnen',
'Assign to Org.': 'Der Org. zuordnen',
'Assign to Organization': 'Der Organisation zuordnen',
'Assign to Person': 'Der Person zuordnen',
'Assign to Site': 'Dem Standort zuordnen',
'Assign': 'Zuordnen',
'Assign ': 'Zuordnung ',
'Assigned By': 'Zugeordnet von',
'Assigned To': 'Zugeordnet zu',
'Assigned to Organization': 'Zur Organisation zugeordnet',
'Assigned to Person': 'Zur Person zugeordnet',
'Assigned to Site': 'Zum Standort zugeordnet',
'Assigned to': 'Zugeordnet zu',
'Assigned': 'Zugeordnet',
'Asylum Application': 'Asylantrag',
'At/Visited Location (not virtual)': '/ In Augenschein genommener Ort (nicht virtuell)',
'Attachments': 'Anhänge',
'Attend to information sources as described in <instruction>': 'Sich um Informationsquellen kümmern wie im Abschnitt beschrieben',
'Attribution': 'Eigenschaften',
'Authentication Required': 'Anmeldung erforderlich',
'Author': 'Autor',
'Availability': 'Verfügbarkeit',
'Available Alternative Inventories': 'Verfügbare alternative Bestände',
'Available Beds': 'Verfügbare Betten',
'Available Inventories': 'Verfügbare Bestände',
'Available Messages': 'Verfügbare Nachrichten',
'Available Records': 'Verfügbare Datensätze',
'Available databases and tables': 'Verfügbare Datenbanken und Tabellen',
'Available for Location': 'Verfügbar für Ort',
'Available from': 'Verfügbar von',
'Available in Viewer?': 'Verfügbar in Lagedarstellung?',
'Available until': 'Verfügbar bis',
'Avalanche': 'Lawine',
'Average': 'Durchschnitt',
'Avoid the subject event as per the <instruction>': 'Vermeiden das Thema Ereignis als je<instruction>',
'Awards': 'Auszeichnungen',
'Background Color for Text blocks': 'Hintergrundfarbe für Textblöcke',
'Background Color': 'Hintergrundfarbe',
'Baldness': 'Kahlköpfigkeit',
'BAMF Registration': 'BAMF Registrierung',
'Banana': 'Banane',
'Bank/micro finance': 'Bank/Mikro Finanzierung',
'Barge Capacity': 'Frachtschiffkapazitäten',
'Barricades are needed': 'Barrikaden sind erforderlich',
'Base Layer?': 'Basis Kartenebene?',
'Base Location': 'Basis Standort/Region',
'Base Site Set': 'Basisstandort definieren',
'Baseline Data': 'Referenzdatum Daten',
'Baseline Number of Beds': 'Referenzdatum Anzahl von Betten',
'Baseline Type Details': 'Referenzdatumstyp Details',
'Baseline Type added': 'Referenzdatumstyp hinzugefügt',
'Baseline Type deleted': 'Referenzdatumstyp gelöscht',
'Baseline Type updated': 'Referenzdatumstyp aktualisiert',
'Baseline Type': 'Referenzdatumstyp',
'Baseline Types': 'Referenzdatumstypen',
'Baseline added': 'Referenzdatum hinzugefügt',
'Baseline deleted': 'Referenzdatum gelöscht',
'Baseline number of beds of that type in this unit.': 'Referenzdatum Anzahl von Betten dieses Typs in dieser Einheit.',
'Baseline updated': 'Referenzdatum aktualisiert',
'Baselines Details': 'Referenzdaten Details',
'Baselines': 'Referenzdaten',
'Basic Assessment Reported': 'Grundlegende Beurteilung berichtet',
'Basic Assessment': 'Grundlegende Beurteilung',
'Basic Details': 'Grundlegende Details',
'Basic reports on the Shelter and drill-down by region': 'Grundlegende Berichte über Unterkunft und Drill-down nach Region',
'Baud rate to use for your modem - The default is safe for most cases': 'Baudrate für das Modem - der Standardwert in den meisten Fällen ausreichend',
'BEA Registration': 'BEA Registrierung',
'Beam': 'Träger',
'Bed Capacity per Unit': 'Bettenkapazität pro Einheit',
'Bed Capacity': 'Bettenkapazität',
'Bed Type': 'Bett-Typ',
'Bed type already registered': 'Bett-Typ bereits registriert',
'Below ground level': 'Unter dem Erdgeschoss',
'Beneficiaries': 'Begünstigte',
'Beneficiary': 'Begünstigter',
'Beneficiary Type': 'Typ des Begünstigten',
'Biological Hazard': 'Biologische Gefahr',
'Bin': 'Lagerbehälter',
'Biscuits': 'Kekse',
'Blizzard': 'Schneesturm',
'Blood Type (AB0)': 'Blutgruppe (ABO)',
'Blowing Snow': 'Schneewehen',
'Boat': 'Boot',
'Bodies found': 'Leichen gefunden',
'Bodies recovered': 'Leichen geborgen',
'Body Recovery Request': 'Leichenbergungsanforderung',
'Body Recovery Requests': 'Leichenbergungsanforderungen',
'Body': 'Body',
'Bomb Explosion': 'Bombenexplosion',
'Bomb Threat': 'Bombendrohung',
'Bomb': 'Bombe',
'Border Color for Text blocks': 'Rahmenfarbe für Textblöcke',
'Both': 'Beides',
'Brand Details': 'Details zur Marke',
'Brand added': 'Marke hinzugefügt',
'Brand deleted': 'Marke gelöscht',
'Brand updated': 'Marke aktualisiert',
'Brand': 'Marke',
'Brands': 'Marken',
'Bricks': 'Ziegelsteine',
'Bridge Closed': 'Brücke ist geschlossen',
'Bucket': 'Eimer',
'Budget Details': 'Details zum Budget',
'Budget Updated': 'Budget aktualisiert',
'Budget added': 'Budget hinzugefügt',
'Budget deleted': 'Budget gelöscht',
'Budget updated': 'Budget aktualisiert',
'Budget': 'Budget',
'Budgeting Module': 'Budget Modul',
'Buffer': 'Puffer',
'Bug': 'Programmfehler',
'Building Assessments': 'Gebäudebeurteilungen',
'Building Collapsed': 'Gebäude zusammengebrochen',
'Building Name': 'Name des Gebäudes',
'Building Safety Assessments': 'Bewertung Gebäudesicherheit',
'Building Short Name/Business Name': 'Gebäude Kurzname / Firmenname',
'Building or storey leaning': 'Gebäude- oder Stockwerkneigung',
'Built using the Template agreed by a group of NGOs working together as the': 'Erstellt unter Verwendung einer abgestimmten Vorlage einer Gruppe von NGOs unter dem Namen',
'Bulk Status Update': 'Massen-Statusaktualisierung',
'Bulk Uploader': 'Upload von Massendaten',
'Bundle Contents': 'Produktpaket Inhalt',
'Bundle Details': 'Produktpaket Details',
'Bundle Updated': 'Produktpaket aktualisiert',
'Bundle added': 'Produktpaket hinzugefügt',
'Bundle deleted': 'Produktpaket gelöscht',
'Bundle updated': 'Produktpaket aktualisiert',
'Bundle': 'Produktpaket',
'Bundles': 'Produktpakete',
'Burn ICU': 'Verbrennungseinheit',
'Burn': 'Brennen',
'Burned/charred': 'Verbrannt / verkohlt',
'By': 'Nach',
'By Einrichtung': 'Nach Einrichtung',
'By Facility': 'Nach Einrichtung',
'By Inventory': 'Nach Bestand',
'CBA Women': 'Frauen CBA',
'CSS file %s not writable - unable to apply theme!': 'CSS Datei %s nicht beschreibbar - Motiv kann nicht angewendet werden!',
'Calculate': 'Starte Berechnung',
'Camp Coordination/Management': 'Camp Koordinierung / Management',
'Camp Service Details': 'Details zu Camp Leistung',
'Camp Service added': 'Camp Leistung hinzugefügt',
'Camp Service deleted': 'Camp Leistung gelöscht',
'Camp Service updated': 'Leistung des Camps aktualisiert',
'Camp Services': 'Leistungen des Camps',
'Camp Type Details': 'Details zum Camp Typ',
'Camp Type added': 'Camp Typ hinzugefügt',
'Camp Type deleted': 'Camp Typ gelöscht',
'Camp Type updated': 'Camp Typ aktualisiert',
'Camp Type': 'Camp Typ',
'Camp Types and Services': 'Camp Typen und Leistungen',
'Camp Types': 'Camp Typen',
'Camp added': 'Camp hinzugefügt',
'Camp deleted': 'Camp gelöscht',
'Camp updated': 'Camp aktualisiert',
'Camp': 'Camp',
'Campaign ID': 'Kampagnen ID',
'Camps': 'Camps',
'Can only disable 1 record at a time!': 'Ein Datensatz kann nur einzeln deaktiviert werden!',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Kann PoIs nur aus einer OpenStreetMap Datei (.osm) oder einem mirror lesen.',
'Cancel': 'Abbrechen',
'Cancel Log Entry': 'Protokolleintrag abbrechen',
'Cancel Shipment': 'Lieferung stornieren',
'Canceled': 'Abgebrochen',
'Cancelled': 'Abgesagt',
'Candidate Matches for Body %s': 'Übereinstimmung des Kandidaten mit Körper %s',
'Canned Fish': 'Fischkonserven',
'Cannot be empty': 'Darf nicht leer sein',
'Cannot disable your own account!': 'Eigenes Konto kann nicht deaktiviert werden.',
'Capacity': 'Maximale Kapazität',
'Capacity (Max Persons)': 'Kapazität (Maximale Zahl von Personen)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Erfassung von Informationen über Opfergruppen einer Katastrophe (Touristen, Fahrgäste, Familien, etc.)',
'Capture Information on each disaster victim': 'Erfassung von Informationen über jedes Opfer einer Katastrophe.',
'Capturing the projects each organization is providing and where': 'Erfassen der Projekte, die von jeder Organisation bereitgestellt werden und wo',
'Cardiology': 'Kardiologie',
'Cargo Pier Depth': 'Wassertiefe Frachtpier',
'Case Number': 'Fallnummer',
'Case Status': 'Fallstatus',
'Cases': 'Fälle',
'Cash': 'Bargeld',
'Cassava': 'Maniok',
'Casual Labor': 'Gelegenheitsarbeit',
'Casualties': 'Todesopfer',
'Catalog Details': 'Details zum Katalog',
'Catalog Item added': 'Katalog Eintrag hinzugefügt',
'Catalog Item deleted': 'Katalog Eintrag gelöscht',
'Catalog Item updated': 'Katalog Eintrag aktualisiert',
'Catalog Items': 'Katalog Einträge',
'Catalog added': 'Katalog hinzugefügt',
'Catalog deleted': 'Katalog gelöscht',
'Catalog updated': 'Katalog aktualisiert',
'Catalog': 'Katalog',
'Catalogs': 'Kataloge',
'Categories': 'Kategorien',
'Category': 'Kategorie',
'Ceilings, light fixtures': 'Höchstgrenzen, Licht Ausstattungsmerkmal',
'Central point to record details on People': 'Zentrale Personenregistrierungsstelle',
'Certificate Catalog': 'Zertifikatskatalog',
'Certificate Details': 'Details zum Zertifikat',
'Certificate Status': 'Status des Zertifikats',
'Certificate added': 'Zertifikat hinzugefügt',
'Certificate deleted': 'Zertifikat gelöscht',
'Certificate updated': 'Zertifikat aktualisiert',
'Certificate': 'Zertifikat',
'Certificates': 'Zertifikate',
'Certification Details': 'Zertifizierungsdetails',
'Certification added': 'Zertifizierung hinzugefügt',
'Certification deleted': 'Zertifizierung gelöscht',
'Certification updated': 'Zertifizierung aktualisiert',
'Certification': 'Zertifizierung',
'Certifications': 'Zertifizierungen',
'Certifying Organization': 'Zertifizierende Organisation',
'Change Password': 'Passwort ändern',
'Channel': 'Kanal',
'Check-in date': 'Check-In Datum',
'Check-out date': 'Check-Out Datum',
'Check Request': 'Anfrage prüfen',
'Check for errors in the URL, maybe the address was mistyped.': 'Prüfen Sie auf Fehler in der URL, vielleicht wurde die Adresse falsch eingegeben.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Prüfen Sie ob die URL auf ein Verzeichnis anstelle einer Webseite verweist',
'Check outbox for the message status': 'Überprüfen sie den Status der Nachricht im Nachrichtenausgang',
'Check to delete': 'Anwahl zum Löschen',
'Check': 'Prüfen',
'Checked': 'Geprüft',
'Checklist created': 'Prüfliste erstellt',
'Checklist deleted': 'Prüfliste gelöscht',
'Checklist of Operations': 'Checkliste für Operationen',
'Checklist updated': 'Checkliste aktualisiert',
'Checklist': 'Prüfliste',
'Chemical Hazard': 'Chemische Gefahr',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemische, Biologische, Radiologische, Nukleare order höchst explosive Gefahr oder Angriff',
'Chicken': 'Huhn',
'Child (2-11)': 'Kind (2-11)',
'Child (< 18 yrs)': 'Kind (< 18 Jahre)',
'Child Abduction Emergency': 'Kindesentführung Notfall',
'Child headed households (<18 yrs)': 'Kindgeführte Haushalte (<18 Jahre)',
'Child': 'Kind',
'Children (2-5 years)': 'Kinder (2-5 Jahre)',
'Children (5-15 years)': 'Kinder (5-15 Jahre)',
'Children (< 2 years)': 'Kinder (< 2 Jahre)',
'Children in adult prisons': 'Kinder in Gefängnissen für Erwachsene',
'Children in boarding schools': 'Kinder in Internaten',
'Children in homes for disabled children': 'Kinder in Unterkünften für behinderte Kinder',
'Children in juvenile detention': 'Kinder in Jugendstrafheimen',
'Children in orphanages': 'Kinder in Waisenhäusern',
'Children living on their own (without adults)': 'Alleinlebende Kinder (ohne Erwachsene)',
'Children not enrolled in new school': 'Kinder, die nicht in der neuen Schule registriert sind',
'Children orphaned by the disaster': 'Durch die Katastrophe verwaiste Kinder',
'Children separated from their parents/caregivers': 'Von Ihren Eltern/Betreuern getrennte Kinder',
'Children that have been sent to safe places': 'Kinder die an sichere Orte gesendet wurden',
'Children who have disappeared since the disaster': 'Kinder, die seit der Katastrophe verschwunden sind',
'Chinese (Taiwan)': 'Chinesisch (Taiwan)',
'Cholera Treatment Capability': 'Cholera Behandlungsmöglichkeiten',
'Cholera Treatment Center': 'Cholera Behandlungscenter',
'Cholera Treatment': 'Cholera Behandlung',
'Cholera-Treatment-Center': 'Cholera-Behandlung-Center',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Wählen Sie eine neue Meldung basierend der neuen Bewertung und Teamurteil. Schwerwiegende Bedingungen, die das gesamte Gebäude betreffen sind der Grund für eine UNSICHER Markierung. Lokalisierte schwere und insgesamt moderate Bedingungen können möglicherweise eine eingeschränkte Verwendung erfordern. Platziere GEPRÜFT Plakat am Haupteingang Positionieren Sie alle anderen Schilder auf jeden wichtigen Eingang.',
'Church': 'Kirche',
'City': 'Ort/Stadt',
'City / Town / Village': 'Stadt / Ort / Dorf',
'Civil Emergency': 'Ziviler Notfall',
'Cladding, glazing': 'Verkleidung, Verglasung',
'Clear': 'Löschen',
'Clear filter': 'Filter zurücksetzen',
'Click on the link %(url)s to reset your password': 'Klicken sie auf den Link %(url)s um ihr Kennwort zurückzusetzen',
'Click on the link %(url)s to verify your email': 'Klicken sie auf den Link %(url)s zum Überprüfen ihrer EMail Adresse',
'Click where you want to open Streetview': 'Auswahl um Streetview zu öffnen',
'Clinical Laboratory': 'Klinisches Labor',
'Clinical Operations': 'Klinikbetrieb',
'Clinical Status': 'Klinischer Status',
'Closed': 'Geschlossen',
'Closed at': 'Geschlossen am',
'Clothing': 'Kleidung',
'Cluster Details': 'Details zum Cluster',
'Cluster Distance': 'Cluster Abstand',
'Cluster Subsector Details': 'Cluster Teilbereich Details',
'Cluster Subsector added': 'Cluster Teilbereich hinzugefügt',
'Cluster Subsector deleted': 'Cluster Teilbereich gelöscht',
'Cluster Subsector updated': 'Cluster Teilbereich aktualisiert',
'Cluster Subsector': 'Cluster Teilsektor',
'Cluster Subsectors': 'Cluster Teilsektoren',
'Cluster Threshold': 'Cluster Schwellwert',
'Cluster added': 'Cluster hinzugefügt',
'Cluster deleted': 'Cluster gelöscht',
'Cluster updated': 'Cluster aktualisiert',
'Cluster': 'Cluster',
'Cluster(s)': 'Cluster',
'Clusters': 'Cluster',
'Cold Wave': 'Kältewelle',
'Collapse, partial collapse, off foundation': 'Zusammengefallen, teilweise zusammengefallen, ohne Unterbau',
'Collective center': 'Kollektivcenter',
'Color for Underline of Subheadings': 'Farbe der Unterstreichungslinie von untergeordneten Überschriften',
'Color of Buttons when hovering': 'Farbe von Schaltflächen beim drüberstreichen',
'Color of bottom of Buttons when not pressed': 'Farbe der unteren Seite von Schaltflächen die nicht gedrückt sind',
'Color of bottom of Buttons when pressed': 'Farbe der unteren Seite von Schaltflächen beim Drücken von Tasten',
'Color of dropdown menus': 'Farbe des Dropdown-Menüs',
'Color of selected Input fields': 'Farbe der ausgewählten Eingabefelder',
'Color of selected menu items': 'Farbe ausgewählter Menüpunkte',
'Columns, pilasters, corbels': 'Säulen, Pfeiler, Konsolen',
'Combined Method': 'Kombinierte Methode',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Kommen Sie später noch einmal wieder. Jeder der diese Seite besucht hat derzeit wahrscheinlich das gleiche Problem wie Sie :-( .',
'Come back later.': 'Kommen Sie doch später noch einmal wieder :-( ',
'Comments': 'Kommentare',
'Comments permitted?': 'Kommentare zugelassen?',
'Commercial/Offices': 'Kommerziell / Büros',
'Commit Date': 'Datum der Einstellung',
'Commit from %s': 'Einstellung von %s',
'Commit': 'Zusage',
'Commit Status': 'Status der Zusage',
'Commiting a changed spreadsheet to the database': 'Ein verändertes Spreadsheet in der Datenbank einstellen.',
'Commitment Added': 'Zusage hinzugefügt',
'Commitment Canceled': 'Zusage abgebrochen',
'Commitment Details': 'Details zur Zusage',
'Commitment Item Details': 'Details zum zugesagten Artikel',
'Commitment Item added': 'Zugesagten Artikel hinzugefügt',
'Commitment Item deleted': 'Zugesagten Artikel gelöscht',
'Commitment Item updated': 'Zugesagten Artikel aktualisiert',
'Commitment Items': 'Zugesagte Artikel',
'Commitment Status': 'Status der Zusage',
'Commitment Updated': 'Zusage aktualisiert',
'Commitment': 'Zusage',
'Commitments': 'Zusagen',
'Committed By': 'Zugesagt durch',
'Committed': 'Zugesagt',
'Committed Items': 'Zugesagte Artikel',
'Committed Skills': 'Zugesagte Fähigkeiten',
'Committing Inventory': 'Zusageninventar',
'Communication problems': 'Kommunikationsprobleme',
'Community Health Center': 'Gesundheitszentrum der Gemeinschaft',
'Community Member': 'Mitglied der Gemeinschaft',
'Competencies': 'Kompetenzen',
'Competency Details': 'Details zu den Kompetenzen',
'Competency Rating Catalog': 'Kompetenzbewertungskatalog',
'Competency Rating Details': 'Details zur Kompetenzbewertung',
'Competency Rating added': 'Kompetenzbewertung hinzugefügt',
'Competency Rating deleted': 'Kompetenzbewertung gelöscht',
'Competency Rating updated': 'Kompetenzbewertung aktualisiert',
'Competency Ratings': 'Kompetenzbewertungen',
'Competency added': 'Kompetenz hinzugefügt',
'Competency deleted': 'Kompetenz gelöscht',
'Competency updated': 'Kompetenz aktualisiert',
'Competency': 'Kompetenz',
'Complete': 'Vollständig',
'Completed': 'Beendet',
'Complete Stock Adjustment': 'Anpassen des gesamten Bestandes',
'Completion Question': 'Abschlussfrage',
'Complexion': 'Gesichtsfarbe',
'Compose': 'Erstellen',
'Compromised': 'Gefährdet',
'Concrete frame': 'Betonrahmen',
'Concrete shear wall': 'Betonscherwand',
'Condition': 'Bedingung',
'Conduct a Disaster Assessment': 'Durchführung einer Katastrophenbeurteilung',
'Configuration': 'Konfiguration',
'Configurations': 'Konfigurationen',
'Configure Run-time Settings': 'Laufzeiteinstellungen konfigurieren',
'Confirm Shipment Received': 'Bestätigen der erhaltenen Lieferung',
'Confirmed': 'Bestätigt',
'Confirming Organization': 'Organisation bestätigen',
'Conflict Details': 'Details zum Konflikt',
'Conflict Resolution': 'Konfliktlösung',
'Connection': 'Verbindung',
'Connect Parser': 'Verbindungsparser',
'Consignment Note': 'Warenbegleitschein',
'Constraints Only': 'Nur Bedingungen',
'Consumable': 'Verbrauchsartikel',
'Contact Data': 'Kontakt Daten',
'Contact Details': 'Details zum Kontakt',
'Contact Info': 'Kontaktinformationen',
'Contact Information Added': 'Konraktinformationen hinzugefuegt.',
'Contact Information Deleted': 'Kontaktinformationen gelöscht',
'Contact Information Updated': 'Kontakt Informationen aktualisiert',
'Contact Information': 'Kontaktinformationen',
'Contact Method': 'Kontaktmethode',
'Contact Name': 'Name des Ansprechpartners',
'Contact Person': 'Kontaktperson',
'Contact Person / Camp Owner': 'Kontaktperson / Camp-Betreiber',
'Contact Phone': 'Telefonnummer des Kontaktes',
'Contact details': 'Details zum Kontakt',
'Contact information added': 'Kontaktinformationen hinzugefügt',
'Contact information deleted': 'Kontaktinformationen gelöscht',
'Contact information updated': 'Kontaktinformationen aktualisiert',
'Contact Us': 'Kontaktieren Sie uns',
'Contact us': 'Kontaktieren Sie uns',
'Contact': 'Kontakt',
'Contacts': 'Kontakte',
'Content': 'Inhalt',
'Contents': 'Inhalte',
'Content Management': 'Content Management',
'Content Management System': 'Content Management System',
'Contract End Date': 'Ablaufzeit des Vertrags',
'Contributor': 'Mitwirkung',
'Conversion Tool': 'Umrechnungstool',
'Cooking NFIs': 'Kochen NFIs',
'Cooking Oil': 'Speiseöl',
'Coordinate Conversion': 'Koordinatentransformation',
'Coping Activities': 'Bewältigungsaktivitäten',
'Copy': 'Kopieren',
'Cost Type': 'Kostentyp',
'Cost per Megabyte': 'Kosten pro Megabyte',
'Cost per Minute': 'Kosten pro Minute',
'Count': 'Zahl',
'Country of Residence': 'Land des Wohnsitzes',
'Country': 'Land',
'County': 'Bezirk',
'County / District': 'Kreis / Bezirk',
'Course Catalog': 'Katalog der Kurse',
'Course Certificate Details': 'Details zum Kurszertifikat ',
'Course Certificate added': 'Kurszertifikat hinzugefügt',
'Course Certificate deleted': 'Kurszertifikat gelöscht',
'Course Certificate updated': 'Kurszertifikat aktualisiert',
'Course Certificates': 'Kurszertifikate',
'Course Details': 'Details zum Kurs',
'Course added': 'Kurs hinzugefügt',
'Course deleted': 'Kurs gelöscht',
'Course updated': 'Kurs aktualisiert',
'Course': 'Kurs',
'Create': 'Anlegen',
'Create & manage Distribution groups to receive Alerts': 'Erstellen und Verwalten von Verteilergruppen um Warnhinweise zu empfangen',
'Create Activity Report': 'Aktivitätsreport erstellen',
'Create Activity Type': 'Aktivitätstyp erstellen',
'Create Activity': 'Aktivität erstellen',
'Create Airport': 'Fluhafen erstellen',
'Create Assessment': 'Beurteilung erstellen',
'Create Asset': 'Anlage erstellen',
'Create Bed Type': 'Bettentyp erstellen',
'Create Brand': 'Marke erstellen',
'Create Budget': 'Budget erstellen',
'Create Bundle': 'Produktpaket erstellen',
'Create Case': 'Fall erstellen',
'Create Catalog Item': 'Katalogeintrag erstellen',
'Create Catalog': 'Katalog erstellen',
'Create Certificate': 'Zertifikat erstellen',
'Create Checklist': 'Prüfliste erstellen',
'Create Cholera Treatment Capability Information': 'Fügen Sie Informationen zur Möglichkeit der Behandlung von Cholerafällen hinzu',
'Create Cluster Subsector': 'Cluster Teilbereich erstellen',
'Create Cluster': 'Cluster erstellen',
'Create Competency Rating': 'Kompetenzbewertung erstellen',
'Create Contact': 'Kontaktperson erstellen',
'Create Course': 'Kurs erstellen',
'Create Dead Body Report': 'Leichenbericht erstellen',
'Create Department': 'Abteilung erstellen',
'Create Event': 'Neues Ereignis erstellen',
'Create Event Type': 'Ereignistyp erstellen',
'Create Facility': 'Einrichtung erstellen',
'Create Facility Type': 'Einrichtungstyp erstellen',
'Create Feature Layer': 'Kartenebene für Objektart erstellen',
'Create Group Entry': 'Gruppeneintrag erstellen',
'Create Group': 'Gruppe erstellen',
'Create Heliport': 'Hubschrauberlandeplatz erstellen',
'Create Hospital': 'Krankenhaus erstellen',
'Create Identification Report': 'Identifizierungsbericht erstellen',
'Create Impact Assessment': 'Folgenabschätzung erstellen',
'Create Incident Report': 'Vorfallbericht erstellen',
'Create Incident Type': 'Vorfalltyp erstellen',
'Create Incident': 'Vorfall erstellen',
'Create Item Category': 'Element Kategorie erstellen',
'Create Item Pack': 'Artikelgruppe erstellen',
'Create Item': 'Neuen Artikel anlegen',
'Create Job Title': 'Berufsbezeichnung erstellen',
'Create Kit': 'Ausstattung (Kit) anlegen',
'Create Kitting': 'Kit zusammenstellen',
'Create Layer': 'Kartenebene anlegen',
'Create Location': 'Standort anlegen',
'Create Location Hierarchy': 'Standorthierarchie anlegen',
'Create Map Profile': 'Kartenkonfiguration anlegen',
'Create Map Style': 'Kartensymbolisierung erstellen',
'Create Marker': 'Marker/Symbol anlegen',
'Create Member': 'Mitglied erstellen',
'Create Membership Type': 'Mitgliedstyp erstellen',
'Create Mobile Impact Assessment': 'Erstellen Sie Mobile Folgenabschätzung',
'Create Office': 'Büro anlegen',
'Create Office Type': 'Bürotyp anlegen',
'Create Organization': 'Organisation anlegen',
'Create Organization Type': 'Organisationstyp anlegen',
'Create Personal Effects': 'Persönlicher Habe anlegen',
'Create PoI Type': 'PoI-Typ erstellen',
'Create Point of Interest': 'PoI erstellen',
'Create Post': 'POST erstellen',
'Create Program': 'Programm erstellen',
'Create Project': 'Projekt anlegen',
'Create Projection': 'Kartenprojektion anlegen',
'Create Rapid Assessment': 'Schnell-Beurteilung anlegen',
'Create Report': 'Bericht anlegen',
'Create Repository': 'Repository anlegen',
'Create Request': 'Anfrage anlegen',
'Create Request Template': 'Anfragevorlage anlegen',
'Create Resource': 'Ressource anlegen',
'Create River': 'Neuen Fluss anlegen',
'Create Role': 'Neue Rolle anlegen',
'Create Room': 'Neues Zimmer anlegen',
'Create Seaport': 'Seehafen erstellen',
'Create Scenario': 'Neues Szenario anlegen',
'Create Sector': 'Neuen Bereich anlegen',
'Create Series': 'Serie erstellen',
'Create Service Profile': 'Neues Leistungsprofil anlegen',
'Create Shelter Service': 'Neue Unterkunft anlegen',
'Create Shelter Type': 'Neue Art der Unterkunft anlegen',
'Create Shelter': 'Neue Unterkunft anlegen',
'Create Skill Type': 'Art der Qualifikation / Fähigkeit anlegen',
'Create Skill': 'Fähigkeiten / Qualifikationen anlegen',
'Create Staff Member': 'Neuen Mitarbeiter anlegen',
'Create Staff Type': 'Mitarbeitertyp erstellen',
'Create Status': 'Neuen Status anlegen',
'Create Supplier': 'Neuen Lieferanten anlegen',
'Create Task': 'Neue Aufgabe anlegen',
'Create Theme': 'Neues Thema anlegen',
'Create User': 'Neuen Benutzer anlegen',
'Create Training Event': 'Neuen Schulungskurs anlegen',
'Create Vehicle': 'Fahrzeug erstellen',
'Create Vehicle Type': 'Fahrzeugtyp erstellen',
'Create Volunteer': 'Neuen Freiwilligen anlegen',
'Create Volunteer Role': 'Freiwilligenrolle erstellen',
'Create Warehouse': 'Neues Warenlager anlegen',
'Create Warehouse Type': 'Warenlagertyp erstellen',
'Create a Person': 'Neue Person anlegen',
'Create a group entry in the registry.': 'Erstellen Sie eine neue Gruppe in der Registry.',
'Create, enter, and manage surveys.': 'Erstellen, Eingabe und Verwaltung von Umfragen.',
'Created By': 'Erstellt von',
'Created On': 'Erstellt am',
'Creation of Surveys': 'Erstellung von Umfragen',
'Credential Details': 'Details zur Qualifikation',
'Credential added': 'Qualifikation hinzugefügt',
'Credential deleted': 'Qualifikation gelöscht',
'Credential updated': 'Qualifikation aktualisiert',
'Credentialling Organization': 'Bescheinigende Organisation',
'Credentials': 'Qualifikationen',
'Credit Card': 'Kreditkarte',
'Crime': 'Kriminalität',
'Criteria': 'Kriterien',
'CTN': 'CTN',
'Currency': 'Währung',
'Current': 'Aktuell',
'Current Address': 'Aktuelle Adresse',
'Current Entries': 'Aktuelle Einträge',
'Current Group Members': 'Aktuelle Gruppemmitglieder',
'Current Home Address': 'Aktuelle Heimatadresse',
'Current Identities': 'Aktuelle Identitäten',
'Current Location': 'Aktueller Standort',
'Current Log Entries': 'Aktuelle Protokolleinträge',
'Current Memberships': 'Aktuelle Mitgliedschaften',
'Current Needs': 'Aktuelle Bedarfsmeldungen',
'Current Records': 'Aktuelle Datensätze',
'Current Registrations': 'Aktuellen Registrierungen',
'Current Status': 'Aktueller Status',
'Current Team Members': 'Aktuelle Team Mitglieder',
'Current Twitter account': 'Aktueller Benutzeraccount bei Twitter',
'Current community priorities': 'Aktuelle Priorisierung in der Community',
'Current general needs': 'Aktueller allgemeiner Bedarf',
'Current greatest needs of vulnerable groups': 'Wichtigste Bedürfnisse der gefährdeten Gruppen',
'Current health problems': 'Derzeitige Gesundheitsprobleme',
'Current number of patients': 'Aktuelle Anzahl von Patienten',
'Current problems, categories': 'Aktuelle Probleme, Kategorien',
'Current problems, details': 'Aktuelle Probleme, Details',
'Current request': 'Aktuelle Anfrage',
'Current response': 'Aktuelle Antwort',
'Current session': 'Aktuelle Sitzung',
'Currently no Certifications registered': 'Derzeit sind keine Zertifizierungen registriert',
'Currently no Competencies registered': 'Derzeit sind keine Kompetenzen registriert',
'Currently no Course Certificates registered': 'Derzeit sind keine Kurszertifikate registriert',
'Currently no Credentials registered': 'Derzeit sind keine Qualifikationen registriert',
'Currently no Missions registered': 'Derzeit sind keine Aufträge registriert',
'Currently no Skill Equivalences registered': 'Derzeit sind keine Fähigkeits-Vergleichbarkeiten registriert',
'Currently no Trainings registered': 'Derzeit keine Schulungen registriert',
'Currently no entries in the catalog': 'Derzeit keine Einträge im Katalog',
'Customs Capacity': 'Zollkapazität',
'Customs Warehousing Storage Capacity': 'Zollwarenlager Kapazität',
'DNA Profile': 'DNA Profil',
'DNA Profiling': 'DNS-Profiling',
'Dam Overflow': 'Dam Überlauf',
'Damage': 'Beschädigung',
'Dangerous Person': 'Gefährliche Person',
'Data uploaded': 'Daten hochgeladen',
'Data': 'Daten',
'Database': 'Datenbank',
'Date & Time': 'Datum und Zeit',
'Date Available': 'Verfügbar ab',
'Date Created': 'Erstellt am',
'Date Due': 'Fällig am',
'Date for Follow-up': 'Wiedervorlage am',
'Date Joined': 'Eintrittsdatum',
'Date Modified': 'Geändert am',
'Date Published': 'Veröffentlicht am',
'Date Question': 'Gefragt am',
'Date Received': 'Erhalten am',
'Date Released': 'Datum der Veröffentlichung',
'Date Requested': 'Angefordert am',
'Date Required': 'Benötigt am',
'Date Required Until': 'Benötigt bis',
'Date Needed By': 'Benötigt ab',
'Date Sent': 'Gesendet am',
'Date Taken': 'Verwendet am',
'Date Until': 'Datum bis',
'Date and Time': 'Datum und Zeit',
'Date and time this report relates to.': 'Datum und Uhrzeit auf die sich dieser Bericht bezieht.',
'Date of Birth': 'Geburtsdatum',
'Date of Latest Information on Beneficiaries Reached': 'Datum von aktuellen Informationen der Finanzhilfen erreicht',
'Date of Report': 'Datum des Berichts',
'Date resigned': 'Datum der Kündigung',
'Date': 'Datum',
'Date/Time of Find': 'Datum/Zeit des Fundes',
'Date/Time when found': 'Datum / Uhrzeit, wann festgestellt',
'Date/Time when last seen': 'Datum / Uhrzeit, wann zuletzt gesehen',
'Date/Time': 'Datum/Zeit',
'De-duplicate': 'Bestätige Duplikat',
'De-duplicator': 'Duplikate entfernen',
'Dead Body Details': 'Details zur Leiche ',
'Dead Body Reports': 'Leichenbericht',
'Dead Body': 'Leiche',
'Dead body report added': 'Leichenbericht hinzugefügt',
'Dead body report deleted': 'Leichenbericht gelöscht',
'Dead body report updated': 'Leichenbericht aktualisiert',
'Deaths in the past 24h': 'Tote der letzten 24h',
'Deaths/24hrs': 'Todesfälle/24std',
'Decimal Degrees': 'Dezimalgrade',
'Decision': 'Entscheidung',
'Decomposed': 'Zerlegt',
'Default Base layer?': 'Standard Hintergrundkartenebene?',
'Default Location': 'Standard Gebiet/Standort',
'Default Height of the map window.': 'Standardhöhe des Kartenfensters',
'Default Map': 'Standard-Kartenfenster',
'Default Marker': 'Standardsymbol',
'Default Width of the map window.': 'Standardbreite des Kartenfensters.',
'Default map question': 'Standard Kartenfrage',
'Default?': 'Standard?',
'Default synchronization policy': 'Standard-Synchronisationsverfahren',
'Defecation area for animals': 'Kotbereich für Tiere',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definieren Sie Szenarien für die Zuordnung der entsprechenden Ressourcen (Menschen, Anlagen und Einrichtungen).',
'Defines the icon used for display of features on handheld GPS.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf mobilen GPS-Geräten verwendet wird.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf der interaktiven Karte sowie für die KML Exporte verwendet wird.',
'Defines the marker used for display & the attributes visible in the popup.': 'Definiert das Symbol, das für die Anzeige und die Attribute im Popup-Fenster verwendet wird.',
'Degrees must be a number between -180 and 180': 'Grad muss eine Zahl zwischen -180 und 180 sein.',
'Delete Alternative Item': 'Alternativen Artikel löschen',
'Delete Assessment Summary': 'Zusammenfassung der Beurteilung löschen',
'Delete Assessment': 'Beurteilung löschen',
'Delete Asset Log Entry': 'Löschen des Protokolleintrags der Anlage',
'Delete Asset': 'Anlage löschen',
'Delete Baseline Type': 'Lösche Typ des Referenzdatums',
'Delete Baseline': 'Referenzdatum löschen',
'Delete Brand': 'Lösche Marke',
'Delete Budget': 'Lösche Budget',
'Delete Bundle': 'Produktpaket löschen',
'Delete Catalog Item': 'Lösche Katalogeintrag',
'Delete Catalog': 'Katalog löschen',
'Delete Certificate': 'Zertifikat löschen',
'Delete Certification': 'Delete Zertifizierung',
'Delete Cluster Subsector': 'Cluster Teilbereich löschen',
'Delete Cluster': 'Cluster löschen',
'Delete Commitment Item': 'Zugesagten Artikel löschen',
'Delete Commitment': 'Zusage löschen',
'Delete Competency Rating': 'Kompetenzbewertung löschen',
'Delete Competency': 'Kompetenz löschen',
'Delete Contact Information': 'Kontaktinformation löschen',
'Delete Course Certificate': 'Lösche Kurszertifikat',
'Delete Course': 'Lösche Kurs',
'Delete Credential': 'Qualifikation löschen',
'Delete Document': 'Dokument löschen',
'Delete Donor': 'Spender löschen',
'Delete Entry': 'Eintrag löschen',
'Delete Event Type': 'Ereignistyp löschen',
'Delete Facility Type': 'Anlagentyp löschen',
'Delete Feature Layer': 'Lösche Objekt Kartenebene',
'Delete Group': 'Gruppe löschen',
'Delete Hospital': 'Krankenhaus löschen',
'Delete Image': 'Grafik löschen',
'Delete Impact Type': 'Löschen des Auswirkungstyps',
'Delete Impact': 'Auswirkung löschen',
'Delete Incident Report': 'Vorfallbericht löschen',
'Delete Item Category': 'Artikel Kategorie löschen',
'Delete Item Pack': 'Artikelgruppe löschen',
'Delete Item': 'Eintrag löschen',
'Delete Job Role': 'Tätigkeit löschen',
'Delete Key': 'Schlüssel löschen',
'Delete Kit': 'Ausstattung (Kit) löschen',
'Delete Layer': 'Ebene löschen',
'Delete Level 1 Assessment': 'Stufe 1 Beurteilung löschen',
'Delete Level 2 Assessment': 'Stufe 2 Beurteilung löschen',
'Delete Location': 'Standort löschen',
'Delete Map Profile': 'Kartenkonfiguration löschen',
'Delete Marker': 'Marker/Symbol löschen',
'Delete Membership': 'Mitgliedschaft löschen',
'Delete Message': 'Nachricht löschen',
'Delete Mission': 'Auftrag löschen',
'Delete Need Type': 'Anforderungstyp löschen',
'Delete Need': 'Anforderung löschen',
'Delete Office': 'Büro löschen',
'Delete Office Type': 'Bürotyp löschen',
'Delete Organization': 'Organisation löschen',
'Delete Organization Type': 'Organisationstyp löschen',
'Delete Peer': 'Peer löschen',
'Delete Person': 'Benutzer löschen',
'Delete Photo': 'Foto löschen',
'Delete Population Statistic': 'Bevölkerungsstatistik löschen',
'Delete Position': 'Position löschen',
'Delete Project': 'Projekt löschen',
'Delete Projection': 'Koordinatensystemprojektion löschen',
'Delete Rapid Assessment': 'Schnell-Beurteilung löschen',
'Delete Received Item': 'Erhaltenen Artikel löschen',
'Delete Received Shipment': 'Erhaltene Lieferung löschen',
'Delete Record': 'Datensatz löschen',
'Delete Report': 'Bericht löschen',
'Delete Request Item': 'Lösche das Anfrageelement',
'Delete Request': 'Lösche die Anfrage',
'Delete Resource': 'Lösche die Ressource',
'Delete Room': 'Raum löschen',
'Delete Scenario': 'Szenario löschen',
'Delete Section': 'Lösche Abschnitt',
'Delete Sector': 'Lösche Bereich',
'Delete Sent Item': 'Lösche gesendeten Artikel',
'Delete Sent Shipment': 'Lösche gesendete Lieferung',
'Delete Service Profile': 'Service-Profil löschen',
'Delete Setting': 'Einstellung löschen',
'Delete Skill Equivalence': 'Fähigkeits-Vergleichbarkeit löschen',
'Delete Skill Provision': 'Fähigkeits-Bereitstellung löschen',
'Delete Skill Type': 'Löschen des Typs der Befähigung',
'Delete Skill': 'Befähigung löschen',
'Delete Staff Type': 'Mitarbeitertyp löschen',
'Delete Status': 'Status löschen',
'Delete Subscription': 'Abonnement löschen',
'Delete Subsector': 'Teilbereich löschen',
'Delete Survey Answer': 'Umfrage - Antwort Löschen',
'Delete Survey Question': 'Umfrage - Frage löschen',
'Delete Survey Series': 'Umfrage Serie löschen',
'Delete Survey Template': 'Umfrage Vorlage löschen',
'Delete Training': 'Schulung löschen',
'Delete Unit': 'Einheit löschen',
'Delete User': 'Benutzer löschen',
'Delete Volunteer': 'Freiwilligen löschen',
'Delete Warehouse': 'Warenlager löschen',
'Delete from Server?': 'Vom Server löschen?',
'Delete': 'Löschen',
'Deliver To': 'Liefern an',
'Delphi Decision Maker': 'Delphi Entscheidungsträger',
'Demographic': 'Demografisch',
'Demonstrations': 'Vorführungen',
'Dental Examination': 'Zahnärztliche Prüfung',
'Dental Profile': 'Zahnärztliches Profil',
'Department / Unit': 'Abteilung / Einheit',
'Department Catalog': 'Abteilungskatalog',
'Dependent Person': 'Abhängige Person',
'Describe the condition of the roads to your hospital.': 'Beschreiben Sie den Zustand der Strassen zu Ihrem Krankenhaus.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Beschreiben Sie den Arbeitsablauf der sich auf diesen Eintrag bezieht (z. B. \\ " ärztliche Untersuchung")',
'Description of Contacts': 'Beschreibung der Kontakte',
'Description of defecation area': 'Beschreibung der Sanitäranlagen',
'Description of drinking water source': 'Beschreibung der Herkunft des Trinkwassers',
'Description of sanitary water source': 'Beschreibung der Herkunft des Sanitärwassers',
'Description of water source before the disaster': 'Beschreibung der Herkunft des Wassers vor der Katastrophe',
'Description': 'Beschreibung',
'Desire to remain with family': 'Wunsch bei der Familie zu bleiben',
'Destination': 'Ziel',
'Destroyed': 'Zerstört',
'Details field is required!': 'Detailfeld ist erforderlich!',
'Dialysis': 'Dialyse',
'Diaphragms, horizontal bracing': 'Membranen, horizontal stützen',
'Diarrhea': 'Durchfall',
'Dignitary Visit': 'Besuch des Würdenträgers',
'Direction': 'Richtung',
'Disable': 'Deaktivieren',
'Disabled participating in coping activities': 'Behinderte beteiligen sich an Bewältigungsaktivitäten',
'Disabled': 'Deaktiviert',
'Disabled?': 'Behindert?',
'Disappeared': 'Untergetaucht',
'Disaster Assessments': 'Katastrophenbeurteilungen',
'Disaster Victim Identification': 'Katastrophen Opferidentifikation',
'Disaster Victim Registry': 'Katastrophen Opferverzeichnis',
'Disaster clean-up/repairs': 'Katastrophen Reinigung/Reparaturen',
'Discharge (cusecs)': 'Ausfluss',
'Discharges/24hrs': 'Abfluss/24 Stunden',
'Discussion Forum on item': 'Diskussionsforum über Eintrag',
'Discussion Forum': 'Diskussionsforum',
'Disease vectors': 'Krankheitsvektoren',
'Dispensary': 'Ambulatorium',
'Displaced Populations': 'Heimatlose Bevölkerung',
'Displaced': 'Vertriebenen',
'Display Polygons?': 'Anzeige Polygone?',
'Display Routes?': 'Anzeige Routen?',
'Display Tracks?': 'Anzeige Wege?',
'Display Waypoints?': 'Anzeige Wegpunkte?',
'Distance between defecation area and water source': 'Distanz zwischen Sanitärbereich und Wasserquelle',
'Distance from %s:': 'Abstand von %s:',
'Distance(Kms)': 'Distanz (km)',
'Distribution groups': 'Verteilergruppen',
'Distribution': 'Verteilung',
'District': 'Bezirk',
'Rural District / District': 'Landkreis / Kreis',
'Do you really want to delete these records?': 'Sollen diese Datensätze wirklich gelöscht werden?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Möchten Sie diese erhaltene Lieferung stornieren? Die Artikel werden aus dem Bestand entfernt werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Möchten Sie diese abgeschickte Sendung wirklich stornieren? Die Artikel werden an die Bestandserfassung zurückgegeben werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to receive this shipment?': 'Wollen Sie die Lieferung empfangen?',
'Do you want to send these Committed items?': 'Wollen Sie die zugesagten Artikel schicken?',
'Do you want to send this shipment?': 'Wollen Sie diese Lieferung abschicken?',
'Document Details': 'Details zum Dokument',
'Document Scan': 'Dokument Scannen',
'Document added': 'Dokument hinzugefügt',
'Document deleted': 'Dokument gelöscht',
'Document updated': 'Dokument aktualisiert',
'Documents and Photos': 'Dokumente und Fotos',
'Documents': 'Dokumente',
'Does this facility provide a cholera treatment center?': 'Verfügt diese Einrichtung über ein Behandlungscenter für Cholera?',
'Doing nothing (no structured activity)': 'Untätig (keine strukturierte Aktivität)',
'Dollars': 'Dollar',
'Domain': 'Domäne',
'Domestic chores': 'Hausarbeit',
'Donated': 'Gespendet',
'Donating Organization': 'Spendende Organisationen',
'Donation': 'Spende',
'Donations': 'Spenden',
'Donation Certificate': 'Spendenzertifikat',
'Donations Needed': 'Spenden benötigt',
'Donation Phone #': 'Spender Telefon #',
'Donor Details': 'Details zum Spender',
'Donor added': 'Spender hinzugefügt',
'Donor deleted': 'Spender gelöscht',
'Donor updated': 'Spender aktualisiert',
'Donor': 'Spender',
'Donors Report': 'Bericht zu Spendern',
'Donors': 'Spender',
'Door frame': 'Türrahmen',
'Download PDF': 'PDF herunterladen',
'Download Template': 'Vorlage herunterladen',
'Draft': 'Entwurf',
'Drainage': 'Abfluß',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Aufstellung eines Budgets für Mitarbeiter und Ausrüstung über mehrere Standorte',
'Drill Down by Group': 'Recherche nach Gruppe',
'Drill Down by Incident': 'Recherche nach Vorfall',
'Drill Down by Shelter': 'Recherche nach Unterkunft',
'Drivers': 'Fahrer',
'Driver Phone Number': 'Telefonnummer des Fahrers',
'Driving License': 'Führerschein',
'Drought': 'Dürre',
'Drop-off Location for Goods?': 'Sammelstelle für Sachspenden?',
'Drugs': 'Drogen',
'Dry Dock': 'Trockendock',
'Due Follow-ups': 'Fällige Wiedervorlagen',
'Dug Well': 'Schachtbrunnen',
'Duplicate?': 'Duplikat?',
'Dust Storm': 'Staub Sturm',
'Dwelling': 'Wohnstätte',
'EMS Reason': 'EMS Grund',
'ER Status Reason': 'Status Notaufnahme Grund',
'ER Status': 'Status Notaufnahme',
'Early Recovery': 'Frühe Besserung / Bergung',
'Earthquake': 'Erdbeben',
'EasyOpt Number': 'EasyOpt Nummber',
'Edit Activity': 'Aktivität bearbeiten',
'Edit Address': 'Adresse bearbeiten',
'Edit Alternative Item': 'Alternativen Artikel bearbeiten',
'Edit Application': 'Anwendung bearbeiten',
'Edit Appointment': 'Termin bearbeiten',
'Edit Assessment Summary': 'Zusammenfassung fuer die Beurteilung bearbeiten',
'Edit Assessment': 'Beurteilung bearbeiten',
'Edit Asset Log Entry': 'Protokolleintrag der Beurteilung bearbeiten',
'Edit Asset': 'Beurteilung bearbeiten',
'Edit Baseline Type': 'Bearbeiten des Typs des Referenzdatums',
'Edit Baseline': 'Referenzdatum bearbeiten',
'Edit Brand': 'Marke bearbeiten',
'Edit Budget': 'Budget bearbeiten',
'Edit Bundle': 'Produktpaket bearbeiten',
'Edit Camp Service': 'Camp Leistung bearbeiten',
'Edit Camp Type': 'Camptyp bearbeiten',
'Edit Camp': 'Camp bearbeiten',
'Edit Catalog Item': 'Katalogeintrag bearbeiten',
'Edit Catalog': 'Katalog bearbeiten',
'Edit Certificate': 'Zertifikat bearbeiten',
'Edit Certification': 'Zertifizierung bearbeiten',
'Edit Cluster Subsector': 'Cluster Teilbereich bearbeiten',
'Edit Cluster': 'Cluster bearbeiten',
'Edit Commitment Item': 'Zugesagten Artikel bearbeiten',
'Edit Commitment': 'Zusage bearbeiten',
'Edit Competency Rating': 'Kompetenzbewertung bearbeiten',
'Edit Competency': 'Kompetenz bearbeiten',
'Edit Contact Information': 'Kontaktinformation bearbeiten',
'Edit Contact': 'Kontakt bearbeiten',
'Edit Contents': 'Inhalt bearbeiten',
'Edit Course Certificate': 'Kurszertifikat bearbeiten',
'Edit Course': 'Kurs bearbeiten',
'Edit Credential': 'Qualifikation bearbeiten',
'Edit Dead Body Details': 'Leichendetails bearbeiten',
'Edit Description': 'Beschreibung bearbeiten',
'Edit Details': 'Details bearbeiten',
'Edit Disaster Victims': 'Katastrophenopfer bearbeiten',
'Edit Document': 'Dokument bearbeiten',
'Edit Donor': 'Spender bearbeiten',
'Edit Email Settings': 'Email Einstellungen bearbeiten',
'Edit Entry': 'Eintrag bearbeiten',
'Edit Event': 'Ereignis bearbeiten',
'Edit Event Type': 'Ereignistyp bearbeiten',
'Edit Facility': 'Einrichtung bearbeiten',
'Edit Facility Type': 'Einrichtungstyp bearbeiten',
'Edit Feature Layer': 'Edit Objektlayer',
'Edit Flood Report': 'Flut Bericht Bearbeiten',
'Edit Gateway Settings': 'Gateway-Einstellungen bearbeiten',
'Edit Group': 'Gruppe bearbeiten',
'Edit Hospital': 'Krankenhaus bearbeiten',
'Edit Human Resource': 'Personelle Ressource bearbeiten',
'Edit Identification Report': 'Identifizierungsbericht bearbeiten',
'Edit Identity': 'Identität bearbeiten',
'Edit Image Details': 'Bild Details bearbeiten',
'Edit Impact Type': 'Typ der Auswirkung bearbeiten',
'Edit Impact': 'Auswirkungen bearbeiten',
'Edit Incident Report': 'Vorfallsbericht bearbeiten',
'Edit Inventory Item': 'Artikel des Bestands bearbeiten',
'Edit Item Category': 'Kategorie des Artikel bearbeiten',
'Edit Item Pack': 'Artikelgruppe bearbeiten',
'Edit Item': 'Artikel bearbeiten',
'Edit Job Role': 'Tätigkeit bearbeiten',
'Edit Key': 'Schlüssel bearbeiten',
'Edit Kit': 'Ausstattung (Kit) bearbeiten',
'Edit Layer': 'Kartenebene bearbeiten',
'Edit Level %d Locations?': 'Bearbeiten von Level %en Standorten?',
'Edit Level 1 Assessment': 'Stufe 1 Beurteilung bearbeiten',
'Edit Level 2 Assessment': 'Stufe 2 Beurteilung bearbeiten',
'Edit Location': 'Standort (Position) bearbeiten',
'Edit Log Entry': 'Protokolleintrag bearbeiten',
'Edit Map Profile': 'Kartenkonfiguration bearbeiten',
'Edit Map Services': 'Kartendienste bearbeiten',
'Edit Marker': 'Marker/Symbol bearbeiten',
'Edit Membership': 'Mitgliedschaft bearbeiten',
'Edit Message': 'Nachricht bearbeiten',
'Edit Messaging Settings': 'Messaging-Einstellungen bearbeiten',
'Edit Mission': 'Auftrag bearbeiten',
'Edit Modem Settings': 'Modem Settings bearbeiten',
'Edit Need Type': 'Bedarfstyp bearbeiten',
'Edit Need': 'Bedarf bearbeiten',
'Edit Office': 'Büro bearbeiten',
'Edit Options': 'Optionen bearbeiten',
'Edit Organization': 'Organisation bearbeiten',
'Edit Parameters': 'Parameter bearbeiten',
'Edit Peer Details': 'Details zu Peer bearbeiten',
'Edit Person Details': 'Details zur Person bearbeiten',
'Edit Personal Effects Details': 'Details zur persönlichen Habe bearbeiten',
'Edit Photo': 'Foto bearbeiten',
'Edit Population Statistic': 'Bevölkerungsstatistik bearbeiten',
'Edit Position': 'Position bearbeiten',
'Edit Problem': 'Problem bearbeiten',
'Edit Project': 'Projekt bearbeiten',
'Edit Projection': 'Kartenprojektion bearbeiten',
'Edit Rapid Assessment': 'Schnell-Beurteilung bearbeiten',
'Edit Received Item': 'Erhaltenen Artikel bearbeiten',
'Edit Received Shipment': 'Erhaltene Lieferung bearbeiten',
'Edit Record': 'Datensatz bearbeiten',
'Edit Registration Details': 'Details zur Registrierung bearbeiten',
'Edit Registration': 'Registrierung bearbeiten',
'Edit Request Item': 'Anfrage zu Artikel bearbeiten',
'Edit Request': 'Anfrage bearbeiten',
'Edit Resource': 'Ressource bearbeiten',
'Edit River': 'Fluss bearbeiten',
'Edit Role': 'Rolle bearbeiten',
'Edit Room': 'Raum bearbeiten',
'Edit Scenario': 'Szenario bearbeiten',
'Edit Sector': 'Bereich bearbeiten',
'Edit Sent Item': 'Gesendeten Artikel bearbeiten',
'Edit Setting': 'Einstellung bearbeiten',
'Edit Settings': 'Einstellungen bearbeiten',
'Edit Shelter Service': 'Unterkunft Leistung bearbeiten',
'Edit Shelter Type': 'Typ der Unterkunft bearbeiten',
'Edit Shelter': 'Unterkunft bearbeiten',
'Edit Skill Equivalence': 'Fähigkeits-Vergleichbarkeit bearbeiten',
'Edit Skill Provision': 'Fähigkeits-Bereitstellung bearbeiten',
'Edit Skill Type': 'Typ der Fähigkeit bearbeiten',
'Edit Skill': 'Fähigkeit bearbeiten',
'Edit Solution': 'Lösung bearbeiten',
'Edit Staff Type': 'Typ von Mitarbeitern bearbeiten',
'Edit Subscription': 'Abonnement bearbeiten',
'Edit Subsector': 'Teilbereich bearbeiten',
'Edit Survey Answer': 'Umfrage - Antwort bearbeiten',
'Edit Survey Question': 'Umfrage - Frage bearbeiten',
'Edit Survey Series': 'Umfrage - Serie bearbeiten',
'Edit Survey Template': 'Umfrage Vorlage bearbeiten',
'Edit Task': 'Aufgabe bearbeiten',
'Edit Team': 'Team bearbeiten',
'Edit Theme': 'Thema bearbeiten',
'Edit Themes': 'Themen bearbeiten',
'Edit Ticket': 'Ticket bearbeiten',
'Edit Track': 'Route bearbeiten',
'Edit Training': 'Schulung bearbeiten',
'Edit Tropo Settings': 'Tropo Einstellungen bearbeiten',
'Edit User': 'Benutzer bearbeiten',
'Edit Volunteer Availability': 'Verfügbarkeit von Freiwilligem bearbeiten',
'Edit Volunteer Details': 'Details zu Freiwilligem bearbeiten',
'Edit Warehouse': 'Warenlager bearbeiten',
'Edit current record': 'Aktuellen Datensatz bearbeiten',
'Edit message': 'Nachricht bearbeiten',
'Edit': 'Bearbeiten',
'Editable?': 'Bearbeitbar?',
'Education materials received': 'Ausbildungsmaterialien erhalten',
'Education materials, source': 'Herkunft der Ausbildungsmaterialien',
'Education': 'Ausbildung/Schulung',
'Effects Inventory': 'Auswirkungsbestandliste',
'Eggs': 'Eier',
'Either a shelter or a location must be specified': 'Es muss entweder eine Unterkunft oder ein Standort angegeben werden',
'Either file upload or document URL required.': 'Es ist entweder ein Dateiupload oder ein URL erforderlich',
'Either file upload or image URL required.': 'Es ist entweder ein Dateiupload oder eine Bild-URL erforderlich',
'Elderly person headed households (>60 yrs)': 'Von älteren Menschen (>60 Jahren) geführte Haushalte',
'Electrical': 'elektrisch',
'Electrical, gas, sewerage, water, hazmats': 'Elektrik, Gas, Abwasser, Wasser, Gefahrgut',
'Elevated': 'Erhöht',
'Elevation': 'Höhe',
'Elevators': 'Aufzüge',
'Eligible for Allowance': 'Berechtigt für Taschengeld',
'Email Address': 'E-Mail-Adresse',
'Email Channels (Inbound)': 'E-Mail Kanäle (eingehend)',
'Email InBox': 'E-Mail Eingang',
'Email Settings': 'E-Mail-Einstellungen',
'Email settings updated': 'E-Mail-Einstellungen aktualisiert',
'Email': 'E-Mail',
'Embalming': 'Einbalsamierung',
'Embassy': 'Botschaft',
'Emergencies': 'Notfälle',
'Emergency': 'Notfall',
'Emergency Capacity Building project': 'Notfall-Kompetenzbildungsprojekt',
'Emergency Contacts': 'Notfallkontakte',
'Emergency Department': 'Notfall-Abteilung',
'Emergency Shelter': 'Notunterkunft',
'Emergency Support Facility': 'Notfall-Unterstützungseinrichtung',
'Emergency Support Service': 'Notfall-Unterstützungsdienst',
'Emergency Telecommunications': 'Notfall-Telekommunikation',
'Enable/Disable Layers': 'Layer aktivieren/deaktivieren',
'Enabled': 'Aktiviert',
'Enabled?': 'Aktiviert?',
'End Date': 'Enddatum',
'End date should be after start date': 'Enddatum muss nach dem Startdatum liegen',
'End date': 'Enddatum',
'End of Period': 'Ende des Zeitraums',
'Enter a GPS Coord': 'Geben Sie eine GPS Koordinate ein',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Geben Sie einen Namen für die Tabelle, die Sie hochladen an (obligatorisch).',
'Enter a new support request.': 'Geben Sie eine neue Unterstützungsanfrage ein.',
'Enter a unique label!': 'Geben Sie eine eindeutige Bezeichnung ein!',
'Enter a valid date before': 'Geben Sie zuvor eine gültiges Datum ein',
'Enter a valid email': 'Geben Sie eine gültige E-Mail-Adresse ein',
'Enter a valid future date': 'Geben Sie ein gültiges, zukünftiges Datum ein',
'Enter some characters to bring up a list of possible matches': 'Geben Sie einige Zeichen ein um eine Liste möglicher Übereinstimmungen anzuzeigen',
'Enter some characters to bring up a list of possible matches.': 'Geben Sie einige Zeichen ein um eine Liste von möglichen Übereinstimmungen anzuzeigen.',
'Enter tags separated by commas.': 'Geben Sie die Tags mit Komma getrennt ein.',
'Enter the same password as above': 'Wiederholen Sie das Kennwort von oben',
'Entered': 'Eingegeben',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Die Eingabe einer Telefonnummer ist freiwillig, sie erlaubt Ihnen aber SMS-Nachrichten zu abonnieren und zu empfangen.',
'Entry deleted': 'Eintrag gelöscht',
'Environment': 'Umgebung/Umwelt',
'Equipment': 'Ausrüstung',
'Error Tickets': 'Fehlertickets',
'Error encountered while applying the theme.': 'Bei der Anwendung des Themas ist ein Fehler aufgetreten.',
'Error in message': 'Fehler in der Nachricht',
"Error logs for '%(app)s'": 'Fehlerprotokolle für "%(app)s"',
'Errors': 'Fehler',
'ESRI Shapefile': 'ESRI Shapefile',
'Essential Staff': 'Unverzichtbarer Mitarbeiter',
'Est. Delivery Date': 'Geschätztes Lieferdatum',
'Estimated # of households who are affected by the emergency': 'Geschätzte Anzahl von Haushalten, die vom Notfall betroffen sind',
'Estimated # of people who are affected by the emergency': 'Geschätzte Anzahl von Menschen, die vom Notfall betroffen sind',
'Estimated Overall Building Damage': 'Geschätzter allgemeiner Gebäudeschaden',
'Estimated Population': 'Geschätzte Bevölkerungszahl',
'Estimated total number of people in institutions': 'Geschätzte Gesamtzahl von Menschen in Einrichtungen',
'Estimated Delivery Date': 'Voraus. Liefertermin',
'Euros': 'Euro',
'Evacuating': 'Evakuieren',
'Evacuees Capacity (Day and Night)': 'Evakuierungspotential (Tag und Nacht)',
'Evacuees Capacity (Night only)': 'Evakuierungspotential (nur Nacht)',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Informationen in dieser Nachricht bewerten. (Dieser Wert sollte NICHT in öffentlichen Warnung verwendet werden.)',
'Event Details': 'Details zum Ereignis',
'Event Type': 'Ereignistyp',
'Event Types': 'Ereignistypen',
'Event added': 'Ereignis hinzugefügt',
'Event deleted': 'Ereignis gelöscht',
'Event updated': 'Ereignis aktualisiert',
'Event': 'Ereignis',
'Events': 'Ereignisse',
'Example': 'Beispiel',
'Exceeded': 'Überschritten',
'Excellent': 'Ausgezeichnet',
'Exclude contents': 'Inhalte ausschließen',
'Excreta disposal': 'Entsorgung von Exkrementen',
'Execute a pre-planned activity identified in <instruction>': 'Ausführen einer vorausgeplanten Aktivität, identifiziert in <instruction>',
'Exercise': 'Übung',
'Exercise?': 'Übung?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Übungen bedeuten, dass alle Anzeigen eine Wassermarke & alle Benachrichtigungen ein Präfix haben.',
'Existing Placard Type': 'Vorhandener Plakattyp',
'Existing food stocks': 'Vorhandener Lebensmitelvorrat',
'Existing location cannot be converted into a group.': 'Vorhandener Standort kann nicht in eine Gruppe transformiert werden.',
'Exits': 'Ausgänge',
'Experience': 'Erfahrung',
'Expiration Date': 'Ablaufdatum',
'Expiration Report': 'Ablaufbericht',
'Expired?': 'Abgelaufen?',
'Expiring Staff Contracts Report': 'Berichte zu ablaufenden Mitarbeiterverträgen',
'Expiry Date': 'Ablaufdatum',
'Expiry (month)': 'Ablauf (Monat)',
'Expiry (months)': 'Ablauf (Monate)',
'Explosive Hazard': 'Explosionsgefahr',
'Export as': 'Exportieren als',
'Export Data': 'Daten exportieren',
'Export Database as CSV': 'Datenbank als CSV exportieren',
'Export in GPX format': 'Als GPX Format exportieren',
'Export in KML format': 'Als KML Format exportieren',
'Export in OSM format': 'Als OSM Format exportieren',
'Export in PDF format': 'In PDF Format exportieren',
'Export in RSS format': 'In RSS Format exportieren',
'Export in XLS format': 'In XLS Format exportieren',
'Exterior Only': 'Nur Externe',
'Exterior and Interior': 'Externe und Interne',
'Eye Color': 'Augenfarbe',
'Facebook Channels': 'Facebook Kanäle',
'Facial hair, color': 'Gesichtsbehaarung, Farbe',
'Facial hair, type': 'Gesichtsbehaarung, Art',
'Facial hear, length': 'Gesichtsbehaarung, Länge',
'Facility': 'Einrichtung',
'Facilities': 'Einrichtungen',
'Facility Details': 'Details zur Einrichtung',
'Facility Operations': 'Einrichtungsmanagement',
'Facility Status': 'Status der Einrichtung',
'Facility Type': 'Einrichtungstyp',
'Facility Types': 'Einrichtungstypen',
'Facility added': 'Einrichtung hinzugefügt',
'Facility or Location': 'Einrichtung oder Standort',
'Facility removed': 'Einrichtung entfernt',
'Facility updated': 'Einrichtung aktualisiert',
'Facility': 'Einrichtung',
'Fail': 'Fehlgeschlagen',
'Failed!': 'Fehlgeschlagen!',
'Fair': 'Mäßig',
'Falling Object Hazard': 'Gefahr durch herabstürzende Objekte',
'Families/HH': 'Familien/HH',
'Family tarpaulins received': 'Familien hat Planen erhalten',
'Family tarpaulins, source': 'Herkunft der Planen für Familie',
'Family': 'Familie',
'Family Members': 'Familienmitglieder',
'Family Reunification': 'Familienzusammenführung',
'Family/friends': 'Familie/Freunde',
'Farmland/fishing material assistance, Rank': 'Ackerland/Materialhilfe für Fischerei, Rang',
'Fatalities': 'Verstorbene',
'Father': 'Vater',
'Feature Layer added': 'Objekt-Layer hinzugefügt',
'Feature Layer deleted': 'Objekt-Layer gelöscht',
'Feature Layer updated': 'Objekt-Layer aktualisiert',
'Feature Layers': 'Objekt-Ebenen',
'Feature Namespace': 'Namespace des Objekts',
'Feature Request': 'Objekt-Anfrage',
'Feature Type': 'Objektart',
'Features Include': 'Beinhaltete Objekte',
'Federal State': 'Bundesland',
'Feeds': 'Newsfeeds',
'Female headed households': 'Weiblich geführte Haushalte',
'Female': 'Weiblich',
'Few': 'Wenige',
'Field Hospital': 'Feldlazarett',
'Field': 'Feld',
'File': 'Datei',
'Fill in Latitude': 'Geben Sie den Breitengrad ein',
'Fill in Longitude': 'Geben Sie den Längengrad ein',
'Filter Options': 'Filteroptionen',
'Filter by Tag': 'Nach Tag filtern',
'Filter by Location': 'Nach Standort filtern',
'Filter by Organization': 'Nach Organisation filtern',
'Filter by Date': 'Nach Datum filtern',
'Filter Field': 'Filter Feld',
'Filter Tweets by the date they were tweeted on': 'Filtere Tweets nach dem Datum der Sendung',
'Filter Tweets by who tweeted them': 'Filtere Tweets nach sendender Person',
'Filter Value': 'Filter Wert',
'Find Dead Body Report': 'Suche Leichenbericht',
'Find Hospital': 'Krankenhaus finden',
'Find Person Record': 'Personendatensatz finden',
'Find Volunteers': 'Freiwillige finden',
'Find a Person Record': 'Suche einen Personendatensatz',
'Find': 'Suchen',
'Fingerprint': 'Fingerabdruck',
'Fingerprinting': 'Fingerabdrücke machen',
'Fingerprints': 'Fingerabdrücke',
'Finished Jobs': 'Erledigte Jobs',
'Fire suppression and rescue': 'Feuer - Eindämmung und Rettung',
'Fire': 'Feuer',
'First': 'Erste',
'First Name': 'Vorname',
'First name': 'Vorname',
'Fishing': 'Fischerei',
'Flags': 'Flaggen',
'Flash Flood': 'Sturzflut',
'Flash Freeze': 'Schockfrost',
'Flexible Impact Assessments': 'Flexible Folgenabschätzungen',
'Flood Alerts show water levels in various parts of the country': 'Flut Alarme zeigen Wasserstände in verschiedenen Teilen des Landes.',
'Flood Alerts': 'Flut Alarme',
'Flood Depth': 'Fluthöhe',
'Flood Report Details': 'Details zum Flutbericht',
'Flood Report added': 'Flutbericht hinzugefügt',
'Flood Report deleted': 'Flutbericht gelöscht',
'Flood Report updated': 'Flutbericht aktualisiert',
'Flood Report': 'Flutbericht',
'Flood Reports': 'Flutberichte',
'Flood': 'Flut',
'Flow Status': 'Status des Ablaufs',
'fluent': 'fliessend',
'Fog': 'Nebel',
'Folder': 'Ordner',
'Follow up': 'Wiedervorlage',
'Follow-up required': 'Wiedervorlage erforderlich',
'Food Supply': 'Lebensmittelversorgung',
'Food assistance': 'Lebensmittel Hilfe',
'Food': 'Lebensmittel',
'Footer file %s missing!': 'Fußzeile Datei %s fehlt!',
'Footer': 'Fußzeile',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Für eine Land wäre dies der ISO2-Code, für eine Stadt wäre es der Flughafen Code.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Für jeden Sync-Partner gibt es einen standard Sync Job, der nach einem vordefiniertem Zeitintervall ausgeführt wird. Sie können auch mehrere Sync Jobs festlegen welche nach ihren Anforderungen entsprechend ausgeführt werden. Klicken Sie auf den Link rechts um zu beginnen.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Für erweiterte Sicherheit empfiehlt sich die Eingabe eines Benutzernamens und Passworts. Bitte benachrichtigen Sie die Administratoren der anderen Geräte in Ihrem Unternehmen damit diese die Zugangsdaten unter dem Punkt Synchronization -> Sync-Partner einrichten.',
'For live help from the Sahana community on using this application, go to': 'Für direkte Hilfe von der Sahana Community zur Anwendung dieses Programmes, gehen Sie zu',
'For messages that support alert network internal functions': 'Für Nachrichten, die Netzwerkswarnungen interner Funktionen unterstützen',
'For more details on the Sahana Eden system, see the': 'Weitere Informationen zum Sahana Eden System finden Sie unter',
'For more information, see': 'Weitere Informationen finden Sie unter',
'For': 'Für',
'Forest Fire': 'Waldbrand',
'Formal camp': 'Offizielles Camp',
'Forms': 'Formulare',
'Found': 'Gefunden',
'Foundations': 'Stiftungen',
'Freezing Drizzle': 'Gefrierender Nieselregen',
'Freezing Rain': 'Gefrierender Regen',
'Freezing Spray': 'Kältespray',
'French': 'Französisch',
'Friday': 'Freitag',
'From Adress': 'Herkunftsadresse',
'From Address': 'Herkunftsadresse',
'From Facility': 'Von Einrichtung',
'From Inventory': 'Aus dem Bestand',
'From Location': 'Vom Standort',
'From Organization': 'Von der Organisation',
'From': 'Von',
'From ': 'Von ',
'Fulfil. Status': 'Status der Bedarfsdeckung',
'Fulfill Status': 'Status der Bedarfsdeckung',
'Fulfillment Status': 'Auftragserfüllungsstatus',
'Full beard': 'Vollbart',
'Full': 'vollständig, voll, ganz',
'Fullscreen Map': 'Großbild Karte',
'Functions available': 'Verfügbare Funktionen',
'Funding': 'Finanzierung',
'Funding Organization': 'Finanzierende Organisation',
'Funeral': 'Beerdigung',
'Further Action Recommended': 'Weitere Aktivität empfohlen',
'GIS Reports of Shelter': 'GIS-Berichte der Unterkünfte',
'GIS integration to view location details of the Shelter': 'GIS-Integration um Details zum Standort der Unterkunft zu erhalten',
"Google Earth's Keyhole Markup Language": "Google Earth's Keyhole Markup Language",
'GPS Marker': 'GPS Markierung/Symbol',
'GPS Track File': 'GPS Track Datei',
'GPS Track': 'GPS Track',
'GPX Track': 'GPX Track',
'GPS eXchange format': 'GPS Geräte Austauschformat',
'Gap Analysis Map': 'Karte zur Lückenanalyse',
'Gap Analysis Report': 'Bericht zur Lückenanalyse',
'Gap Analysis': 'Lückenanalyse',
'Gap Map': 'Lückenkarte',
'Gap Report': 'Bericht über Lücken',
'Gateway Settings': 'Gateway-Einstellungen',
'Gateway settings updated': 'Gateway-Einstellungen aktualisiert',
'Gateway': 'Gateway',
'Gender': 'Geschlecht',
'General Comment': 'Allgemeine Bemerkung',
'General Medical/Surgical': 'Allgemein - Medizinisch/Chirurgisch',
'General emergency and public safety': 'Allgemein - Notfall und öffentliche Sicherheit',
'General information on demographics': 'Allgemein - Informationen zur Demographie',
'General': 'Allgemein',
'Geocode': 'Geocodierung',
'Geocoder Selection': 'Geocoder Auswahl',
'Geometry Name': 'Name der Geometrie',
'Geophysical (inc. landslide)': 'Geophysikalisch (inc. Erdrutsch)',
'Geotechnical Hazards': 'Geotechnische Gefahren',
'Geotechnical': 'Geotechnisch',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Das Modul Geraldo steht innerhalb dier aktiven Python Umgebung nicht zur Verfügung - für die PDF-Ausgabe muss es nachinstalliert werden.',
'German': 'Deutsch',
'Get incoming recovery requests as RSS feed': 'Empfangen von eingehenden Bergungsanforderungen als RSS-Feed',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Kurze Beschreibung des Bildes, z. B. was ist wo auf dem Bild zu sehen ist (nicht verpflichtend).',
'Give information about where and when you have seen them': 'Geben Sie Information wo und wann Sie sie gesehen haben',
'Global Messaging Settings': 'Globale Nachrichteneinstellungen',
'Go to Request': 'Zur Anfrage',
'Go': 'Los',
'Goatee': 'Spitzbart',
'Good Condition': 'Guter Zustand',
'Good': 'Gut',
'Goods Received Note': 'Warenempfangsbestätigung',
'Government UID': 'Regierungs-UID',
'Government building': 'Regierungsgebäude',
'Government District': 'Regierungsbezirk',
'Government': 'Regierung',
'Grade': 'Klasse',
'Greek': 'Griechisch',
'Green': 'Grün',
'GRN': 'GRN',
'GRN Number': 'GRN Nummer',
'Ground movement, fissures': 'Untergrundbewegung, Risse',
'Ground movement, settlement, slips': 'Untergrundbewegung, Bodensenkung, Abrutsche',
'Group Description': 'Gruppenbeschreibung',
'Group Details': 'Gruppendetails',
'Group Head': 'Gruppenleiter',
'Group Member added': 'Gruppenmitglied hinzugefügt',
'Group Members': 'Gruppenmitglieder',
'Group Memberships': 'Gruppenzugehörigkeiten',
'Group Name': 'Gruppenname',
'Group Size Day': 'Gruppengröße Tag',
'Group Size Night': 'Gruppengröße Nacht',
'Group Title': 'Gruppentitel',
'Group Type': 'Gruppentyp',
'Group added': 'Gruppe hinzugefügt',
'Group deleted': 'Gruppe gelöscht',
'Group description': 'Gruppenbeschreibung',
'Group updated': 'Gruppe aktualisiert',
'Group': 'Gruppe',
'Grouped by': 'Gruppiert nach',
'Groups removed': 'Gruppen entfernt',
'Groups': 'Gruppen',
'GU Done': 'GU erledigt',
'Guest': 'Gast',
'HR Manager': 'Personalmanager',
'Hail': 'Hagel',
'Hair Color': 'Haarfarbe',
'Hair Length': 'Haarlänge',
'Hair Style': 'Haarschnitt',
'Has data from this Reference Document been entered into Sahana?': 'Wurden Daten von diesem Referenzdokument in Sahana eingetragen?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Wurde das Zertifikat für den Empfang der Lieferung an den Absender übergeben?',
'Has the GRN (Goods Received Note) been completed?': 'Wurde die Warenempfangsmeldung (GRN) ausgefüllt?',
'Hazard Pay': 'Gefahrenzulage',
'Hazardous Material': 'Gefahrgut',
'Hazardous Road Conditions': 'Gefährliche Strassenverhältnisse',
'Header Background': 'Hintergrund der Kopfzeile',
'Header background file %s missing!': 'Hintergrund der Kopfzeile Datei %s fehlt!',
'Headquarters': 'Hauptquartiere',
'Head of Family': 'Familienoberhaupt',
'Health care assistance, Rank': 'Unterstützung Gesundsheitspflege, Rang',
'Health center with beds': 'Gesundheitszentrum mit Betten',
'Health center without beds': 'Gesundheitszentrum ohne Betten',
'Health center': 'Gesundheitszentrum',
'Health services status': 'Status des Gesundheitswesens',
'Health': 'Gesundheit',
'Healthcare Worker': 'Arbeiter im Gesundheitswesen',
'Heat Wave': 'Hitzewelle',
'Heat and Humidity': 'Wärme und Feuchtigkeit',
'Height': 'Höhe',
'Height (cm)': 'Höhe (cm)',
'Height (m)': 'Höhe (m)',
'Height': 'Höhe',
'Heliports': 'Hubschrauberlandeplätze',
'Help': 'Hilfe',
'Help Wanted': 'Hilfe benötigt',
'Helps to monitor status of hospitals': 'Hilfe um den Status von Krankenhäusern zu überwachen',
'Helps to report and search for missing persons': 'Hilfe beim Melden von und bei der Suche nach vermissten Personen',
'Here are the solution items related to the problem.': 'Hier sind die mit diesem Problem verbundenen Lösungselemente.',
'Heritage Listed': 'Erbe aufgelistet',
'Hide': 'Verstecken',
'Hierarchy': 'Hierarchie',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierachiestufe 0 Name (d.h. Land)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierachiestufe 1 Name (z. B. Land oder Provinz / Gebiet)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierachiestufe 2 Name (z. B. Bezirk)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierachiestufe 3 Name (z. B. Ort / Stadt / Dorf)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierachiestufe 4 Name (z.B. Nachbarschaft)',
'Hierarchy Level 5 Name': 'Hierarchie Stufe 5 Name',
'High Tide Depth': 'Tiefe bei maximaler Tide',
'High Water': 'Hochwasser',
'High': 'Hoch',
'Highest Priority Open Requests': 'Offene Anfragen höchster Priorität',
'History': 'Geschichte',
'Hit the back button on your browser to try again.': 'Verwenden Sie die Back Schaltfläche ihres Browsers um es erneut zu versuchen.',
'Holiday Address': 'Urlaubsadresse',
'Home Address': 'Heimatsadresse',
'Home Country': 'Land des Wohnsitzes',
'Home Crime': 'Häusliche Kriminalität',
'Home': 'Startseite',
'Hospital Details': 'Details zum Krankenhaus',
'Hospital Status Report': 'Statusbericht zum Krankenhaus',
'Hospital information added': 'Krankenhausinformationen hinzugefügt',
'Hospital information deleted': 'Krankenhausinformationen gelöscht',
'Hospital information updated': 'Krankenhausinformationen aktualisiert',
'Hospital status assessment.': 'Beurteilung des Zustand des Krankenhauses',
'Hospital': 'Krankenhaus',
'Hospitals': 'Krankenhäuser',
'Hour': 'Stunde',
'Hours': 'Stunden',
'Hours by': 'Stunden gem.',
'Hours by Program Import': 'Stunden gem. Programm Import',
'Hours by Program Report': 'Stunden nach Programmbericht',
'Hours by Role Import': 'Stunden gem. Rollen Import',
'Hours by Role Report': 'Stunden nach Rollenbericht',
'Household kits received': 'Haushaltsbausätze (-kits) erhalten',
'Household kits, source': 'Herkunft der Haushaltbausätze (-kits)',
'Housing Unit': 'Gebäude',
'How does it work?': 'Wie funktioniert das?',
'How is this person affected by the disaster? (Select all that apply)': 'Wie ist diese Person von der Katastrophe betroffen? (Wählen Sie alles Zutreffende aus)',
'How long will the food last?': 'Wie lange werden die Lebensmittel reichen?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Men (18 yrs+) are Dead due to the crisis': 'Wieviele Männer (18 Jahre+) sind durch die Krise umgekommen',
'How many Men (18 yrs+) are Injured due to the crisis': 'Wie viele Männer (18 + Jahre) wurden wegen der Krise verletzt',
'How many Men (18 yrs+) are Missing due to the crisis': 'Wie viele Männer (18 + Jahre) sind aufgrund der Krise verschollen',
'How many Women (18 yrs+) are Dead due to the crisis': 'Wieviele Frauen (18+ Jahre) sind durch die Krise umgekommen',
'How many Women (18 yrs+) are Injured due to the crisis': 'Wieviele Frauen (18+ Jahre) wurden wegen der Krise verletzt',
'How many Women (18 yrs+) are Missing due to the crisis': 'Wie viele Frauen (18 Jahre und älter) sind aufgrund der Krise verschollen',
'How many days will the supplies last?': 'Wie viele Tage werden die Waren reichen?',
'How many new cases have been admitted to this facility in the past 24h?': 'Wie viele neue Fälle wurden während der letzten 24 Stunden dieser Einrichtung zugewiesen?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Wie viele der Patienten mit dieser Krankheit sind in den letzten 24 Stunden in dieser Einrichtung gestorben?',
'How many patients with the disease are currently hospitalized at this facility?': 'Wieviele Patienten mit dieser Krankheit sind momentan in dieser Einrichtung in Behandlung?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Wie viele Details sind sichtbar. Eine hohe Zoom-Stufe bedeutet viele Details, aber keine gute Übersicht. Eine niedrige Zoom-Stufe führt zu einer guten Übersicht, es fehlen aber die Details.',
'Hub': 'Zentrum',
'Human Resource Details': 'Details zur Personalressource',
'Human Resource Management': 'Management der Personalressourcen',
'Human Resource added': 'Personalressource hinzugefügt',
'Human Resource removed': 'Personalressource entfernt',
'Human Resource updated': 'Personalressource aktualisiert',
'Human Resource': 'Personalressource',
'Human Resources': 'Personalressourcen',
'Humanitarian NGO': 'Humanitäre NGO',
'Humanitarian Use': 'Humanitäre Zwecke',
'Hurricane Force Wind': 'Wind in Hurrikanstärke',
'Hurricane': 'Wirbelsturm',
'Hygiene kits received': 'Hygienekits empfangen',
'Hygiene kits, source': 'Herkunft der Hygienekits',
'Hygiene practice': 'Hygienepraxis',
'Hygiene problems': 'Hygieneprobleme',
'I am available in the following area(s)': 'Ich stehe in folgenden Bereichen zur Verfügung',
'IATA': 'IATA',
'ICAO': 'ICAO',
'ID Tag Number': 'Identifikations-Etikett-Nummer',
'ID Tag': 'Identifikationsetikett',
'ID Type': 'ID-Typ',
'Ice Pressure': 'Eisdruck',
'Iceberg': 'Eisberg',
'Identification Report': 'Indentifizierungsbericht',
'Identification Reports': 'Identifizierungsberichte',
'Identification Status': 'Status der Identifizierung',
'Identification': 'Identifizierung',
'Identified as': 'Identifiziert als',
'Identified by': 'Identifiziert durch',
'Identity Details': 'Details zur Identität',
'Identity added': 'Identität hinzugefügt',
'Identity deleted': 'Identität gelöscht',
'Identity updated': 'Identität aktualisiert',
'Identity': 'Identität',
'If a ticket was issued then please provide the Ticket ID.': 'Wenn ein Ticket ausgestellt wurde, bitte die Ticket-ID angeben.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Wenn ein Benutzer sicherstellt, dass er oder sie eine Email-Adresse in dieser Domäne besitzt, wird das Approver Feld dazu verwendet, um zu bestimmen ob und von wem weitere Genehmigungen erforderlich sind.',
'If it is a URL leading to HTML, then this will downloaded.': 'Handelt es sich um eine URL zu einer HTML Seite, dann wird diese heruntergeladen.',
'If neither are defined, then the Default Marker is used.': 'Wenn nichts davon definiert wurde, wird der Standard Marker (Symbol) verwendet.',
'If no marker defined then the system default marker is used': 'Wenn keine Markierung (Symbolisierung) definiert ist dann wird die im System festgelegte Standardmarkierung verwendet',
'If no, specify why': 'Wenn nein, geben Sie bitte einen Grund dafür an',
'If none are selected, then all are searched.': 'Wird keine ausgewählt, werden alle durchsucht.',
'If the location is a geographic area, then state at what level here.': 'Wenn der Ort ein geographisches Gebiet ist, geben Sie bitte eine entsprechende Stufe an',
'If the request type is "Other", please enter request details here.': 'Wenn der Anfragetyp "Andere" ist, geben Sie bitte hier weitere Details zur Anfrage ein.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer mit der gleichen Domainadresse automatisch als Mitarbeiter dieser Organisation zugeordnet.',
'If this is set to True then mails will be deleted from the server after downloading.': "Wenn dies auf 'Wahr' gesetzt ist, dann werden die Mails nach dem Herunterladen vom Server gelöscht.",
'If this record should be restricted then select which role is required to access the record here.': 'Wenn der Zugriff auf diesen Datensatz beschränkt werden soll, wählen Sie hier die Rolle aus, die für den Zugriff erforderlich ist.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Wenn dieser Eintrag beschränkt werden soll, dann wählen Sie hier aus, welche Rolle(n) für den Zugriff auf den Eintrag berechtigt sind.',
'If yes, specify what and by whom': 'Wenn ja, geben Sie an, was und von wem',
'If yes, which and how': 'Wenn ja, welche und wie',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt damit die Daten verifiziert werden können.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'Wenn sie die Geonames ID des Standortes wissen, dann können Sie diese hier eingeben.',
'If you know what the OSM ID of this location is then you can enter it here.': 'Wenn sie die OSM ID dieser des Standortes wissen, dann können Sie diese hier eingeben.',
'If you need to add a new document then you can click here to attach one.': 'Wenn sie ein neues Dokument hinzufügen wollen, dann können sSie hier Klicken um eines anzufügen.',
'If you want several values, then separate with': 'Wenn Sie mehrere Werte möchten, dann trennen Sie diese mit',
'If you would like to help, then please': 'Wenn Sie helfen möchten, dann bitte',
'Ignore Errors?': 'Fehler ignorieren?',
'Illegal Immigrant': 'Illegaler Einwanderer',
'Illiterate': 'Analphabet',
'illiterate': 'Analphabet',
'Image Details': 'Details zum Bild',
'Image Tags': 'Tags für Bild',
'Image Type': 'Typ des Bilds',
'Image Upload': 'Bild hochladen',
'Image added': 'Bild hinzugefügt',
'Image deleted': 'Bild gelöscht',
'Image updated': 'Bild aktualisiert',
'Image': 'Bild',
'Imagery': 'Bilddaten',
'Images': 'Bilder',
'Impact Assessments': 'Folgenabschätzung',
'Impact Details': 'Details zur Folge/Auswirkung',
'Impact Type Details': 'Details zum Typ der Auswirkung',
'Impact Type added': 'ATyp der Auswirkung hinzugefügt',
'Impact Type deleted': 'Typ der Auswirkung gelöscht',
'Impact Type updated': 'Typ der Auswirkung aktualisiert',
'Impact Type': 'Auswirkungsarten',
'Impact Types': 'Auswirkungsarten',
'Impact added': 'Auswirkung hinzugefügt',
'Impact deleted': 'Auswirkung gelöscht',
'Impact updated': 'Auswirkung aktualisiert',
'Impacts': 'Auswirkungen',
'Import & Export Data': 'Import & Export von Daten',
'Import Catalog Items': 'Importiere Katalogartikel',
'Import Data': 'Import von Daten',
'Import Event Types': 'Importiere Ereignistypen',
'Import File': 'Datei importieren',
'Import Heliports': 'Hubschrauberlandeplätze importieren',
'Import Incident Types': 'Ereignistypen importieren',
'Import Locations': 'Gebiete/Standorte importieren',
'Import Projects': 'Projekte importieren',
'Import Staff': 'Mitarbeiter importieren',
'Import Suppliers': 'Lieferanten importieren',
'Import Training Participants': 'Kursteilnehmer importieren',
'Import Users': 'Import von Benutzern',
'Import Volunteers': 'Freiwillige importieren',
'Import Warehouse Stock': 'Warenlagerbestand importieren',
'Import Warehouses': 'Warenlager importieren',
'Import and Export': 'Import und Export',
'Import from CSV': 'Import einer CSV-Datei',
'Import from OpenStreetMap': 'Import aus OpenStreetMap',
'Import from Ushahidi Instance': 'Import aus Ushahidi Instanz',
'Import Hours': 'Import Stundenliste',
'Import if Master': 'Import wenn Master',
'Import multiple tables as CSV': 'Mehrere Tabellen als CSV importieren',
'Import Participant List': 'Import Teilnehmerliste',
'Import Template Layout': 'Import Vorlagenlayout',
'Import Templates': 'Import Vorlagen',
'Import': 'Import',
'Important': 'Wichtig',
'Importantly where there are no aid services being provided': 'Bedeutsam wo keine Hilfsleistungen angeboten werden',
'Importing data from spreadsheets': 'Importieren von Daten aus Tabellendokumenten',
'Improper decontamination': 'Unzureichende Dekontamination',
'Improper handling of dead bodies': 'Unzureichende Behandlung von Leichen',
'In Catalogs': 'In Katalogen',
'In Inventories': 'In den Beständen',
'In Process': 'In Bearbeitung',
'In Progress': 'In Arbeit',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Beim Aufbau des Fensters wird die Karte maximiert um das Fenster auszufüllen, daher ist es nicht notwendig hier einen grossen Wert festzulegen.',
'Inbound Mail Settings': 'Eingehende Mail-Einstellungen',
'InBox': 'Eingang',
'Incident Categories': 'Kategorien für Vorfälle ',
'Incident Report Details': 'Details zum Vorfall-Bericht',
'Incident Report added': 'Vorfall-Bericht hinzugefügt',
'Incident Report deleted': 'Vorfall-Bericht gelöscht',
'Incident Report updated': 'Vorfall-Bericht aktualisiert',
'Incident Report': 'Vorfall-Bericht',
'Incident Reporting System': 'Vorfall-Berichtsystem',
'Incident Reporting': 'Vorfall-Berichtswesen',
'Incident Reports': 'Vorfall-Berichte',
'Incident': 'Vorfall',
'Incidents': 'Vorfälle',
'Incident Type': 'Vorfallstyp',
'Incident Types': 'Typen von Vorfällen',
'Incident Timeline': 'Zeitplan der Ereignisse',
'Incoming Shipment canceled': 'Eingehende Sendung abgebrochen',
'Incoming Shipment updated': 'Eingehende Sendung aktualisiert',
'Incoming': 'Eingehend',
'Incomplete': 'Unvollständig',
'Individuals': 'Einzelpersonen',
'Indirect support cost HQ': 'Indirekte Unterstützungskosten Hauptquartier',
'Industrial Crime': 'Industrielle Kriminalität',
'Industrial': 'Industriell',
'Industry Fire': 'Industriefeuer',
'Infant (0-1)': 'Säugling (0-1)',
'Infectious Disease (Hazardous Material)': 'Ansteckende Krankheit (gefährliches Material)',
'Infectious Disease': 'Ansteckende Krankheit',
'Infectious Diseases': 'Infektionskrankheiten',
'Infestation': 'Aktivierung',
'Informal Leader': 'Informeller Leiter',
'Informal camp': 'Informelles Camp',
'Information gaps': 'Informationenlücken',
'Infusion catheters available': 'Infusionskatheter verfügbar',
'Infusion catheters need per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusion catheters needed per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusions available': 'Infusionen verfügbar',
'Infusions needed per 24h': 'Benötigte Infusionen pro 24h',
'Initials': 'Namenskürzel',
'Inspected': 'Geprüft',
'Inspection Date': 'Prüfdatum',
'Inspection date and time': 'Datum und Uhrzeit der Überprüfung',
'Inspection time': 'Zeit der Überprüfung',
'Inspector ID': 'Prüfer-ID',
'Instant Porridge': 'Hafer Fertigbrei',
'Institution': 'Institution',
'Instructor': 'Ausbilder',
'Insufficient vars: Need module, resource, jresource, instance': 'Unzureichende vars: Benötige module, resource, jresource, instance',
'Insufficient': 'Nicht ausreichend',
'Intake Items': 'Annahme Güter',
'Intergovernmental Organization': 'Zwischenstaatliche Organisation',
'Interior walls, partitions': 'Innere Wände, Partitionen',
'Internal Resources': 'Interne Ressourcen',
'Internal Resource': 'Interne Ressource',
'Internal Shipment': 'Interne Lieferung',
'Internal State': 'Interner Zustand',
'International NGO': 'Internationale NGO',
'International Organization': 'Internationale Organisation',
'interpreter required': 'Dolmetscher erforderlich',
'Interview taking place at': 'Ort des Interviews',
'inv Home Page': 'inv Homepage',
'Invalid Query': 'Ungültige Abfrage',
'Invalid request!': 'Ungültige Anfrage!',
'Invalid ticket': 'Ungültiges Ticket',
'Invalid': 'Ungültig / Invalide',
'Inventories': 'Bestände',
'Inventory': 'Bestand',
'Inventory Item Details': 'Details zu einzelnem Bestandsartikel',
'Inventory Item updated': 'Bestandsartikel aktualisiert',
'Inventory Item': 'Bestandsartikel',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Bestandsartikel umfassen sowohl Verbrauchsmaterialien als auch solche die am Bestimmungsort in Anlagen umgewandelt werden.',
'Inventory Items': 'Bestandsartikel',
'Inventory Management': 'Lagerbestandsverwaltung',
'Inventory of Effects': 'Bestand von Vermögenswerten',
'Is editing level L%d locations allowed?': 'Ist die Bearbeitung von Level L%d Standorten zulässig?',
'Is it safe to collect water?': 'Ist es sicher Wasser zu sammeln?',
'Is this a strict hierarchy?': 'Ist dies eine strenge Hierarchie?',
'Issuing Authority': 'Ausstellende Behörde',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Es erfasst nicht nur die Orte wo sie aktiv sind, sondern erfasst auch Informationen über den Umfang der Projekte die sie im jeweiligen Gebiet durchführen.',
'Item Added to Shipment': 'Artikel der Lieferung hinzugefügt',
'Item Catalog Details': 'Details zum Artikelkatalog',
'Item Categories': 'Artikelkategorien',
'Item Category Details': ' Details zur Artikelkategorie',
'Item Category added': 'Artikelkategorie hinzugefügt',
'Item Category deleted': 'Artikelkategorie gelöscht',
'Item Category updated': 'Artikelkategorie aktualisiert',
'Item Category': 'Artikelkategorie',
'Item Details': 'Details zum Artikel',
'Item Pack Details': 'Details zum Artikelpaket ',
'Item Pack added': 'Artikelpaket hinzugefügt',
'Item Pack deleted': 'Artikelpaket gelöscht',
'Item Pack updated': 'Artikelpaket aktualisiert',
'Item Packs': 'Artikelpaket',
'Item Tracking Status': 'Artikel Verfolgungsstatus',
'Item/Description': 'Artikel/Beschreibung',
'Items/Description': 'Artikel/Beschreibung',
'Item added to Inventory': 'Artikel zum Bestand hinzugefügt',
'Item added to shipment': 'Artikel der Lieferung hinzugefügt',
'Item added': 'Artikel hinzugefügt',
'Item already in Bundle!': 'Artikel bereits in Produktpaket!',
'Item already in Kit!': 'Artikel bereits in Ausstattung (Kit)!',
'Item already in budget!': 'Artikel bereits im Budget!',
'Item deleted': 'Artikel gelöscht',
'Item removed from Inventory': 'Artikel aus dem Bestand entfernt',
'Item updated': 'Artikel aktualisiert',
'Item': 'Artikel',
'Items in Category are Vehicles': 'Artikel in dieser Kategorie sind Fahrzeuge',
'Items in Category can be Assets': 'Artikel in der Kategorie können als Anlagen verwendet werden',
'Items': 'Artikel',
'Japanese': 'Japanisch',
'Jerry can': 'Kanister',
'Jew': 'Jude',
'Jewish': 'Jüdisch',
'Job Role Catalog': 'Katalog für Tätigkeiten',
'Job Role Details': 'Details zur Tätigkeit',
'Job Role added': 'Tätigkeit hinzugefügt',
'Job Role deleted': 'Tätigkeit entfernt',
'Job Role updated': 'Tätigkeit aktualisiert',
'Job Role': 'Tätigkeit',
'Job Roles': 'Tätigkeiten',
'Job Title': 'Berufsbezeichnung',
'Job Title Catalog': 'Katalog der Berufsbezeichnungen',
'Journal Entry Details': 'Details zum Journaleintrag',
'Journal entry added': 'Journaleintrag hinzugefügt',
'Journal entry deleted': 'Journaleintrag gelöscht',
'Journal entry updated': 'Journaleintrag aktualisiert',
'Key Details': 'Details zum Schlüssel',
'Key added': 'Schlüssel hinzugefügt',
'Key deleted': 'Schlüssel gelöscht',
'Key updated': 'Schlüssel aktualisiert',
'Key': 'Schlüssel',
'Keys': 'Schlüssel',
'Kit Contents': 'Inhalt der Ausstattung (Kit)',
'Kit Details': 'Details zur Ausstattung (Kit)',
'Kit Updated': 'Ausstattung (Kit) aktualisiert',
'Kit added': 'Ausstattung (Kit) hinzugefügt',
'Kit deleted': 'Ausstattung (Kit) gelöscht',
'Kit updated': 'Ausstattung (Kit) aktualisiert',
'Kits': 'Ausstattungen (Kits)',
'Kit': 'Ausstattung (Kit)',
'Kit?': 'Ausstattung (Kit)?',
'Kitting': 'Ausstattung zusammenstellen',
'Known Identities': 'Bekannte Identitäten',
'Known incidents of violence against women/girls': 'Bekannte Fälle von Gewalt gegen Frauen/Mädchen',
'Known incidents of violence since disaster': 'Bekannte Fällen von Gewalt seit der Katastrophe',
'LICENSE': 'LIZENZ',
'Lack of material': 'Mangel an Material',
'Lack of school uniform': 'Fehlende Schuluniformen',
'Lack of supplies at school': 'Fehlende Vorräte an der Schule',
'Lack of transport to school': 'Fehlender Transportmöglichkeiten zur Schule',
'Lactating women': 'Stillende frauen',
'Lahar': 'Mure',
'Landslide': 'Erdrutsch',
'Language': 'Sprache',
'Language / Communication Mode': 'Sprache / Verständigungsmodus',
'Last Downloaded': 'Zuletzt heruntergeladen',
'Last Name': 'Nachname',
'Last Pull': 'Letzter Pull',
'Last Push': 'Letzter Push',
'Last known location': 'Letzte bekannte Position',
'Last synchronization time': 'Zeitpunkt der letzte Synchronisierung',
'Last updated by': 'Letzte Aktualisierung durch',
'Last updated on': 'Letzte Aktualisierung am',
'Last updated': 'Letzte Aktualisierung',
'Last': 'Letzte',
'Latest Information': 'Aktuelle Informationen',
'Latitude & Longitude': 'Breitengrad und Längengrad',
'Latitude is North-South (Up-Down).': 'Breitengrad ist Nord-Süd (Oben-Unten).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Der Breitengrad ist Null am Äquator, Positiv auf der nördlichen und negativ auf der südlichen Erdhalbkugel.',
'Latitude of Map Center': 'Breitengrad der Kartenmitte',
'Latitude of far northern end of the region of interest.': 'Nördlichster Breitengrad der betroffenen Region',
'Latitude of far southern end of the region of interest.': 'Südlichster Breitengrad der betroffenen Region',
'Latitude should be between': 'Breite muss zwischen',
'Latitude': 'Breitengrad',
'Latrines': 'Toiletten',
'Law enforcement, military, homeland and local/private security': 'Executive, Militär und andere lokale/private Sicherheitsagenturen',
'Layer Poperties': 'Kartenebenen anpassen',
'Layer added': 'Layer hinzugefügt',
'Layer deleted': 'Layer gelöscht',
'Layer updated': 'Layer aktualisiert',
'Layer': 'Kartenebene',
'Layers updated': 'Kartenebenen aktualisiert',
'Layers': 'Kartenebenen',
'Leader': 'Anführer',
'Lead Implementer': 'Hauptimplementierer',
'Legally Departed': 'Legal abgereist',
'Legend Format': 'Format der Legende',
'Legend': 'Legende',
'Length (m)': 'Länge (m)',
'Less Options': 'Weniger Optionen',
'Level of Award': 'Stufe der Auszeichnung',
'Level 1 Assessment Details': 'Stufe 1 Beurteilung - Details',
'Level 1 Assessment added': 'Stufe 1 Beurteilung hinzugefügt',
'Level 1 Assessment deleted': 'Stufe 1 Beurteilung entfernt',
'Level 1 Assessment updated': 'Stufe 1 Beurteilung aktualisiert',
'Level 1 Assessments': 'Stufe 1 Beurteilungen',
'Level 1': 'Stufe 1',
'Level 2 Assessment Details': 'Stufe 2 Beurteilung - Details',
'Level 2 Assessment added': 'Stufe 2 Beurteilung hinzugefügt',
'Level 2 Assessment deleted': 'Stufe 2 Beurteilung entfernt',
'Level 2 Assessment updated': 'Stufe 2 Beurteilung aktualisiert',
'Level 2 Assessments': 'Stufe 2 Beurteilungen',
'Level 2 or detailed engineering evaluation recommended': 'Stufe 2 oder detaillierte technische Evaluierung empfohlen',
'Level 2': 'Stufe 2',
'Level 3': 'Stufe 3',
'Level': 'Stufe',
'Library support not available for OpenID': 'OpenID wird von Bibliothek nicht unterstützt',
'License Plate': 'Nummernschild',
'LineString': 'LineString',
'Link to this result': 'Link zu dieser Liste',
'List / Add Baseline Types': 'Arten von Referenzdaten auflisten / hinzufügen',
'List / Add Impact Types': 'Arten von Auswirkungen auflisten / hinzufügen',
'List / Add Services': 'Leistungen auflisten / hinzufügen',
'List / Add Types': 'Typen auflisten / hinzufügen',
'List Activities': 'Aktivitäten auflisten',
'List All Assets': 'Alle Anlagen auflisten',
'List All Catalog Items': 'Auflisten aller Artikel aus dem Katalog',
'List All Commitments': 'Auflisten aller Zusagen',
'List All Entries': 'Alle Einträgen auflisten',
'List All Item Categories': 'Auflisten aller Artikelkategorien',
'List All Memberships': 'Alle Mitgliedschaften auflisten',
'List All Organization Approvers & Whitelists': 'Zeige alle Organisationsbestätiger & Whitelists',
'List All Received Shipments': 'Auflisten aller empfangenen Lieferungen',
'List All Records': 'Auflisten aller Datensätze',
'List All Requested Items': 'Auflisten aller angefragten Artikel',
'List All Requests': 'Auflisten aller Anfragen',
'List All Roles': 'Zeige alle Rollen',
'List All Sent Shipments': 'Liste aller gesendeten Lieferungen',
'List All Users': 'Zeige alle Nutzer',
'List All Vehicles': 'Liste aller Fahrzeuge',
'List All': 'Alle auflisten',
'List Alternative Items': 'Liste alternativer Artikel',
'List Assessment Summaries': 'Zusammenfassungen der Beurteilungen auflisten',
'List Assessments': 'Beurteilungen auflisten',
'List Assets': 'Anlagen auflisten',
'List Availability': 'Liste Verfügbarkeit',
'List Baseline Types': 'Liste der Typen von Referenzdaten',
'List Baselines': 'Liste der Referenzdaten',
'List Brands': 'Marken auflisten',
'List Budgets': 'Budgets auflisten',
'List Bundles': 'Produktpakete auflisten',
'List Camp Services': 'Liste der Leistungen im Camp',
'List Camp Types': 'Liste Typen von Camps',
'List Camps': 'Liste Camps',
'List Catalog Items': 'Katalogelemente auflisten',
'List Catalogs': 'Liste Kataloge',
'List Certificates': 'Liste Zertifikate',
'List Certifications': 'Liste Zertifizierungen',
'List Checklists': 'Checklisten Auflisten',
'List Cluster Subsectors': 'Cluster Teilbereiche Auflisten',
'List Clusters': 'Cluster Auflisten',
'List Commitment Items': 'Liste zugesagter Artikel',
'List Commitments': 'Liste Zusagen',
'List Competencies': 'Liste Kompetenzen',
'List Competency Ratings': 'Liste Kompetenzrating',
'List Conflicts': 'Liste Konflikte',
'List Contact Information': 'Liste Kontaktinformationen',
'List Contacts': 'Liste Kontakte',
'List Course Certificates': 'Liste Kurszertifikate',
'List Courses': 'Liste Kurse',
'List Credentials': 'Liste von Qualifikationen',
'List Current': 'Aktuelle Liste',
'List Documents': 'Liste Dokumente',
'List Donors': 'Liste Spender',
'List Events': 'Liste Ereignisse',
'List Facilities': 'Liste Einrichtungen',
'List Feature Layers': 'Liste Objekt-Layer',
'List Flood Reports': 'Liste Flutberichte',
'List Groups': 'Liste Gruppen',
'List Groups/View Members': 'Liste Gruppen/Anzeige der Mitglieder',
'List Hospitals': 'Liste Krankenhäuser',
'List Human Resources': 'Liste der personellen Ressourcen',
'List Identities': 'Identitäten auflisten',
'List Images': 'Bilder auflisten',
'List Impact Assessments': 'Folgenabschätzung auflisten',
'List Impact Types': 'Auswirkungsarten auflisten',
'List Impacts': 'Auswirkungen auflisten',
'List Incident Reports': 'Vorfallberichte auflisten',
'List Item Categories': 'Liste Artikelkategorien',
'List Item Packs': 'Liste der Artikelpakete',
'List Items in Inventory': 'Liste der Artikel im Bestand',
'List Items': 'Liste der Artikel',
'List Job Roles': 'Liste der Tätigkeiten',
'List Keys': 'Schlüssel auflisten',
'List Kits': 'Liste Ausstattungen (Kits)',
'List Layers': 'Liste Layer',
'List Level 1 Assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 1 assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 2 Assessments': 'Liste Stufe 2 Beurteilungen',
'List Level 2 assessments': 'Liste Stufe 2 Beurteilungen',
'List Locations': 'Standorte auflisten',
'List Log Entries': 'Protokolleinträge auflisten',
'List Map Profiles': 'Liste der Kartenkonfigurationen',
'List Markers': 'Marker/Symbole auflisten',
'List Members': 'Mitglieder auflisten',
'List Memberships': 'Mitgliedschaften auflisten',
'List Messages': 'Nachrichten auflisten',
'List Missing Persons': 'Vermisste Personen auflisten',
'List Missions': 'Liste Aufträge',
'List Need Types': 'Bedarftypen auflisten',
'List Needs': 'Bedarf auflisten',
'List Offices': 'Liste der Büros',
'List Organizations': 'Liste der Organisationen',
'List Peers': 'Liste der Peers',
'List Personal Effects': 'Liste der persönlichen Habe',
'List Persons': 'Liste der Personen',
'List Photos': 'Liste der Bilder',
'List Population Statistics': 'Liste Bevölkerungsstatistiken',
'List Positions': 'Liste der Positionen',
'List Problems': 'Liste der Probleme',
'List Projections': 'Liste der Kartenprojektionen',
'List Projects': 'Liste Projekte',
'List Rapid Assessments': 'Liste Schnell-Beurteilungen',
'List Recurring Requests': 'Liste wiederkehrender Anfragen',
'List Received Items': 'Liste empfangene Artikel',
'List Received Shipments': 'Liste empfangene Lieferungen',
'List Records': 'Liste Datensätze',
'List Registrations': 'Liste Registrierungen',
'List Reports': 'Liste Berichte',
'List Request Items': 'Angefragte Artikel auflisten',
'List Requests': 'Anfragen auflisten',
'List Resources': 'Ressourcen auflisten',
'List Rivers': 'Flüsse auflisten',
'List Roles': 'Rollen auflisten',
'List Rooms': 'Liste Räume',
'List Scenarios': 'Liste Szenarien',
'List Sections': 'Abschnitte auflisten',
'List Sectors': 'Bereiche auflisten',
'List Sent Items': 'Gesendete Artikel auflisten',
'List Sent Shipments': 'Liste verschickte Lieferungen',
'List Service Profiles': 'Leistungsprofile auflisten',
'List Settings': 'Einstellungen auflisten',
'List Shelter Services': 'Leistungen der Unterkunft auflisten',
'List Shelter Types': 'Typen der Unterkunft auflisten',
'List Shelters': 'Unterkünfte auflisten',
'List Site Needs': 'Alle Bedarfe',
'List Skill Equivalences': 'Liste Fähigkeits-Vergleichbarkeiten',
'List Skill Provisions': 'Fähigkeits-Bereitstellungen auflisten',
'List Skill Types': 'Liste der Typen von Fähigkeiten',
'List Skills': 'Liste Fähigkeiten',
'List Solutions': 'Liste Lösungen',
'List Staff Types': 'Mitarbeitertypen auflisten',
'List Status': 'Status auflisten',
'List Subscriptions': 'Abonnements anzeigen',
'List Subsectors': 'Teilbereiche auflisten',
'List Support Requests': 'Liste der Anfragen nach Unterstützung',
'List Survey Answers': 'Liste Umfrage-Antworten',
'List Survey Questions': 'Liste Umfrage-Fragen',
'List Survey Series': 'Liste Umfrage-Serien',
'List Survey Templates': 'Liste Umfrage-Vorlagen',
'List Tasks': 'Aufgaben auflisten',
'List Teams': 'Teams auflisten',
'List Themes': 'Themen auflisten',
'List Tickets': 'Tickets auflisten',
'List Tracks': 'Tracks auflisten',
'List Trainings': 'Schulungen/Ausbildung auflisten',
'List Units': 'Einheiten auflisten',
'List Users': 'Liste Benutzer',
'List Warehouses': 'Liste Warenlager',
'List all': 'Alle auflisten',
'List available Scenarios': 'Liste verfügbarer Szenarien',
'List of Items': 'Liste der Artikel',
'List of Missing Persons': 'Liste der vermißten Personen',
'List of Peers': 'Liste der Peers',
'List of Reports': 'Liste der Berichte',
'List of Requests': 'Liste der Anfragen',
'List of Spreadsheets uploaded': 'Liste der hochgeladenen Tabellen',
'List of Spreadsheets': 'Liste der Tabellen',
'List of Volunteers for this skill set': 'Liste der Freiwilligen für dieses Fachgebiet',
'List of Volunteers': 'Liste der Freiwilligen',
'List of addresses': 'Liste der Adressen',
'List unidentified': 'Nicht identifizierte Objekte auflisten',
'List': 'Liste',
'List/Add': 'Auflisten/Hinzufügen',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Liste "Wer macht was & wo". Ermöglicht Hilfsorganizationen, ihre Aktivitäten zu koordinieren',
'Literacy': 'Schriftkundigkeit',
'literate': 'schriftkundig',
'Live Help': 'Aktuelle Hilfe',
'Livelihood': 'Lebensgrundlage',
'Load Cleaned Data into Database': 'Bereinigte Daten in die Datenbank laden',
'Load Raw File into Grid': 'Unformatierte Datei ins Grid laden',
'Loading': 'Wird geladen',
'Loading Equipment': 'Be-/Entladeaustattung',
'Local Name': 'Lokaler Name',
'Local Names': 'Lokale Namen',
'Location 1': 'Standort 1',
'Location 2': 'Standort 2',
'Location Detail': 'Details zum Gebiet/Standort',
'Location Details': 'Standortdetails',
'Location Hierarchies': 'Standort-Hierachien',
'Location Hierarchy Level 0 Name': 'Standort-Hierachie Level 0 Name',
'Location Hierarchy Level 1 Name': 'Standort-Hierachie Level 1 Name',
'Location Hierarchy Level 2 Name': 'Standort-Hierachie Level 2 Name',
'Location Hierarchy Level 3 Name': 'Standort-Hierarchie Level 3 Name',
'Location Hierarchy Level 4 Name': 'Standort-Hierarchie Level 4 Name',
'Location Hierarchy Level 5 Name': 'Standort-Hierarchie Level 5 Name',
'Location added': 'Standort hinzugefügt.',
'Location deleted': 'Standort gelöscht',
'Location group cannot be a parent.': 'Standortgruppe kann kein übergeordnetes Element sein',
'Location group cannot have a parent.': 'Standortgruppe kann kein übergeordnetes Elemenet haben.',
'Location groups can be used in the Regions menu.': 'Standortgruppen können im Gebietsmenu verwendet werden.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Standortgruppen können genutzt werden, um die Ergebnisse auf der Karte und in den Suchergebnissen zu filtern.',
'Location updated': 'Standort aktualisiert',
'Location': 'Standort',
'Locations of this level need to have a parent of level': 'Standorte dieser Ebene müssen ein übergeordnetes Element der folgenden Ebene haben',
'Locations': 'Standorte',
'Lockdown': 'Sperrung',
'Log Entry Details': 'Details zum Protokolleintrag',
'Log entry added': 'Protokolleintrag hinzugefügt',
'Log entry deleted': 'Protokolleintrag gelöscht',
'Log entry updated': 'Protokolleintrag aktualisiert',
'Log': 'Protokoll',
'Logged By': 'Protokolliert durch',
'Logged in': 'Eingeloggt',
'Logged out': 'Ausgeloggt',
'Login': 'Anmeldung',
'Logistics Management System': 'Logistik Managementsystem',
'Logistics': 'Logistik',
'Logo file %s missing!': 'Datei mit Logo %s fehlt!',
'Logout': 'Abmelden',
'Long Name': 'Langschriftlicher Name',
'Long Text': 'Langer Text',
'Longitude is West - East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is West-East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude of Map Center': 'Geographische Länge des Kartenmittelpunktes',
'Longitude of far eastern end of the region of interest.': 'Geographische Länge des östlichen Endes de Interessensgebietes.',
'Longitude of far western end of the region of interest.': 'Geographische Länge des westlichen Endes de Interessensgebietes.',
'Longitude should be between': 'Die Geographische Länge soll in folgendem Bereich liegen',
'Longitude': 'Geographische Länge',
'Looting': 'Plünderung',
'Lost Password': 'Kennwort vergessen',
'Lost': 'Verloren',
'Low': 'Niedrig',
'Low Tide Depth': 'Tiefe bei minimaler Tide',
'Magnetic Storm': 'Magnetischer Sturm',
'Mail': 'Post',
'Main Facility': 'Haupteinrichtung',
'Major Damage': 'Großer Schaden',
'Major expenses': 'Hauptausgaben',
'Major outward damage': 'Größter nach außen gerichteter Schaden',
'Major': 'Maßgeblich',
'Make Commitment': 'Eine Zusage machen',
'Make New Commitment': 'Neue Zusage machen',
'Make Request': 'Anfrage erstellen',
'Make Supplies Request': 'Artikelanfrage stellen',
'Make preparations per the <instruction>': 'Vorbereitungen treffen für <instruction>',
'Male': 'Männlich',
'Manage Layers in Catalog': 'Kartenebenen im Katalog verwalten',
'Manage Relief Item Catalogue': 'Katalog der Unterstützungselemente verwalten',
'Manage Users & Roles': 'Benutzer- und Rollenverwaltung',
'Manage Warehouses/Sites': 'Warenlager/Orte verwalten',
'Manage Your Facilities': 'Eigene Einrichtungen verwalten',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Verwaltung der Anfragen nach Vorräten, Anlagen, Mitarbeitern oder anderen Ressourcen. Vergleich mit den Beständen, wo Vorräte angefordert werden',
'Manage requests of hospitals for assistance.': 'Verwaltung der Anfragen von Krankenhäusern nach Unterstützung.',
'Manage volunteers by capturing their skills, availability and allocation': 'Verwaltung der Freiwilligen Helfer anhand ihrer Fähigkeiten, Verfügbarkeit und Zuordnung.',
'Managing Office': 'Verwaltungsbüro',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Verpflichtend. Beim GeoServer, ist das der Name des Layers. In den WFS Capabilities entspricht es dem Namen des FeatureType (ohne namespace - Teil hinter dem Doppelpunkt!).',
'Mandatory. The URL to access the service.': 'Verpflichtend. Die access URL des Dienstes.',
'Manual Synchronization': 'Manuelle Synchronisation',
'Manual': 'Anleitung',
'Many': 'Viele',
'Map Center Latitude': 'Geographische Breite des Kartenmittelpunkt',
'Map Center Longitude': 'Geographische Länge des Kartenmittelpunkt',
'Map Profile Details': 'Details zur Kartenkonfiguration ',
'Map Profile added': 'Kartenkonfiguration hinzugefügt',
'Map Profile deleted': 'Kartenkonfiguration gelöscht',
'Map Profile removed': 'Kartenkonfiguration entfernt',
'Map Profile updated': 'Kartenkonfiguration aktualisiert',
'Map Profile': 'Kartenkonfiguration',
'Map Profiles': 'Kartenkonfigurationen',
'Map Height': 'Höhe des Kartenfensters',
'Map Service Catalog': 'Karten Service-Katalog',
'Map Settings': 'Karteneinstellungen',
'Map Styles': 'Kartensymbolisierungen',
'Map Viewing Client': 'Kartenviewer',
'Map Width': 'Breite des Kartenfensters',
'Map Zoom': 'Kartenvergrößerung',
'Map of Hospitals': 'Karte der Krankenhäuser',
'Map of Offices': 'Karte der Büros',
'Map of Requests': 'Karte der Anfragen',
'Map of Vehicles': 'Karte der Fahrzeuge',
'Map': 'Karte',
'Marine Security': 'Hafensicherheit',
'Marital Status': 'Familienstand',
'Marker Details': 'Details zum Marker/Symbol',
'Marker added': 'Marker/Symbol hinzugefügt',
'Mark as duplicate': 'Markiere als Duplikat',
'Marker deleted': 'Marker/Symbol gelöscht',
'Marker updated': 'Marker/Symbol hinzugefügt',
'Marker': 'Marker/Symbol',
'Markers': 'Marker/Symbole',
'Master Message Log to process incoming reports & requests': 'Haupt-Nachrichtenprotokoll um eingehende Berichte und Anfragen zu bearbeiten',
'Master Message Log': 'Haupt-Nachrichtenprotokoll',
'Match Percentage': 'Grad der Übereinstimmung',
'Match Requests': 'Passende Anfrage',
'Match percentage indicates the % match between these two records': 'Der Grad der Übereinstimmung gibt die prozentuale Übereinstimmung zwischen zwei Datensätzen an',
'Match?': 'Übereinstimmung?',
'Matching Catalog Items': 'Übereinstimmende Katalogelemente',
'Matching Items': 'Übereinstimmende Artikel',
'Matching Records': 'Übereinstimmende Datensätze',
'Maximum Extent': 'Maximale Ausdehnung',
'Maximum Location Latitude': 'Maximale Geographische Breite des Gebietes',
'Maximum Location Longitude': 'Maximale Geographische Länge des Gebietes',
'Max Height': 'Max Höhe',
'Medical': 'Medizin',
'Medical and public health': 'Medizinische Betreuung und öffentliches Gesundheitswesen',
'Medium': 'Mittel',
'Megabytes per Month': 'Megabytes pro Monat',
'Member removed from Group': 'Mitglied aus Gruppe entfernt',
'Members': 'Mitglieder',
'Membership Details': 'Details zur Mitgliedschaft',
'Membership Fee': 'Mitgliedsbeitrag',
'Membership Paid': 'Kostenpflichtige Mitgliedschaft',
'Membership Types': 'Mitgliedschaftstypen',
'Membership updated': 'Mitgliedschaft aktualisiert',
'Membership': 'Mitgliedschaft',
'Memberships': 'Mitgliedschaften',
'Message Details': 'Details zur Nachricht',
'Message Log': 'Nachrichtenprotokoll',
'Message Variable': 'Nachrichtenvariable',
'Message added': 'Nachricht hinzugefügt',
'Message deleted': 'Nachricht gelöscht',
'Message updated': 'Nachricht aktualisiert',
'Message variable': 'Nachrichtenvariable',
'Message': 'Nachricht',
'Messages': 'Nachrichten',
'Messaging settings updated': 'Einstellungen zur Nachrichtenübertragung aktualisiert',
'Messaging': 'Nachrichtenübertragung',
'Measure Length: Click the points along the path & end with a double-click': 'Längenmessung: Punkte entlang eines Verlaufs anklicken und mit Doppelklick abschließen',
'Meteorite': 'Meteorit',
'Meteorological (inc. flood)': 'Meteorologisch (auch Flut)',
'Method used': 'Verwendete Methode',
'Middle Name': 'Zweiter Vorname',
'Migrants or ethnic minorities': 'Migranten oder ethnische Minderheiten',
'Military': 'Militär',
'Military Grid Reference System PDFs': 'Military Grid Reference System PDFs',
'Minimum Location Latitude': 'Minimale Geographische Breite des Gebietes',
'Minimum Location Longitude': 'Minimale Geographische Länge des Gebietes',
'Minimum shift time is 6 hours': 'Minimum Dienstzeit ist sechs Stunden.',
'Minor Damage': 'Kleinere Schäden',
'Minor/None': 'Gering / Keine',
'Minorities participating in coping activities': 'Minderheiten beteiligen sich an Bewältigungsaktivitäten / Krisenbewältigungsaktivitäten',
'Minutes must be a number between 0 and 60': 'Minuten muss eine Zahl zwischen 0 und 60 sein',
'Minutes per Month': 'Minuten pro Monat',
'Minutes should be a number greater than 0 and less than 60': 'Minuten muss eine Zahl größer als 0 und kleiner als 60 sein',
'Miscellaneous': 'Verschiedenes',
'Missed': 'Verpasst',
'Missing Person Details': 'Nähere Angaben zur vermissten Person',
'Missing Person Registry': 'Register der vermissten Personen',
'Missing Person': 'Vermisste Person',
'Missing Persons Registry': 'Register der vermissten Personen',
'Missing Persons Report': 'Bericht über vermisste Personen',
'Missing Persons': 'Vermisste Personen',
'Missing Report': 'Bericht über Vermisste',
'Missing Senior Citizen': 'Vermisster älterer Bürger',
'Missing Vulnerable Person': 'Vermisste gefährdete Person',
'Missing': 'Fehlend',
'Mission Record': 'Auftragsbericht',
'Mission added': 'Auftrag hinzugefügt',
'Mission deleted': 'Auftrag gelöscht',
'Mission updated': 'Auftrag aktualisiert',
'Missions': 'Aufträge',
'Mobile Basic Assessment': 'Mobile Grundlegende Beurteilung',
'Mobile Commons Channels': 'Mobile Commons Kanäle',
'Mobile Phone': 'Mobiltelefon',
'Mobile': 'Handy',
'Mode': 'Modus',
'Model/Type': 'Modell/Typ',
'Modem Settings': 'Modemeinstellungen',
'Modem settings updated': 'Modemeinstellungen aktualisiert',
'Moderate': 'Moderat',
'Modify Information on groups and individuals': 'Anpassen der Information über Gruppen und Einzelpersonen',
'Modifying data in spreadsheet before importing it to the database': 'Anpassen von Daten in der Tabelle vor dem Import in die Datenbank',
'Module provides access to information on current Flood Levels.': 'Modul bietet Zugriff auf Information zum aktuellen Stand der Flut',
'Module': 'Modul',
'Monday': 'Montag',
'Monetization Report': 'Monetarisierungsbericht',
'Monitoring Frequency': 'Monitoring Frequenz',
'Monthly Cost': 'Monatliche Kosten',
'Monthly Salary': 'Monatliches Gehalt',
'Month': 'Monat',
'Monthly': 'Monatlich',
'Months': 'Monate',
'More': 'Mehr',
'More Options': 'Mehr Optionen',
'Morgue Status': 'Status der Leichenhalle',
'Morgue Units Available': 'Leichenhallenplätze verfügbar',
'Mosque': 'Moschee',
'Mother': 'Mutter',
'Motorcycle': 'Motorrad',
'Moustache': 'Schnurrbart',
'MultiPolygon': 'MultiPolygon',
'Multiple Matches': 'Mehrere Übereinstimmungen',
'Multiple': 'Mehrere',
'Muslim': 'Moslem',
'Must a location have a parent location?': 'Muss ein Standort einen übergeordneten Standort haben?',
'My Current function': 'Meine aktuelle Funktion',
'My Tasks': 'Meine Aufgaben',
'My Open Tasks': 'Meine unerledigten Aufgaben',
'N/A': 'Nicht zutreffend',
'NO': 'NEIN',
'NZSEE Level 1': 'NZSEE Stufe 1',
'NZSEE Level 2': 'NZSEE Stufe 2',
'Name and/or ID': 'Name und/oder ID',
'Name of Award': 'Name der Auszeichnung',
'Name of Driver': 'Name des Fahrers',
'Name of Institute': 'Name der Institution',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und die für den Hintergrund des Headers benutzt werden soll.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und für das obere linke Bild verwendet werden soll.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name der Datei (& optionales Unterverzeichnis) die sich in views befindet und für die Fußzeile verwendet werden soll.',
'Name of the person in local language and script (optional).': 'Name der Person in lokaler Sprache und Schreibweise (optional).',
'Name': 'Name',
'Name, Org and/or ID': 'Name, Org und/oder ID',
'Names can be added in multiple languages': 'Namen können in mehreren Sprachen hinzugefügt werden',
'National ID Card': 'Nationaler Identitätsnachweis',
'National NGO': 'Nationale NGO',
'Nationality of the person.': 'Nationalität der Person.',
'Nationality': 'Nationalität',
'native': 'Muttersprache',
'Nautical Accident': 'See-Unfall',
'Nautical Hijacking': 'See-Entführung',
'Need Details': 'Details zum Bedarf',
'Need Type Details': 'Details zum Bedarfstyp',
'Need Type added': 'Bedarfstyp hinzugefügt',
'Need Type deleted': 'Bedarfstyp gelöscht',
'Need Type updated': 'Bedarfstyp aktualisiert',
'Need Type': 'Bedarfstyp',
'Need Types': 'Bedarfstypen',
'Need added': 'Bedarf hinzugefügt',
'Need deleted': 'Bedarf gelöscht',
'Need to be logged-in to be able to submit assessments': 'Sie müssen eingeloggt sein um Beurteilungen zu veröffentlichen',
'Need to configure Twitter Authentication': 'Die Twitter Authentifizierungsdaten müssen konfiguriert sein',
'Need to specify a Budget!': 'Sie müssen ein Budget angegeben!',
'Need to specify a Kit!': 'Müssen Sie eine Ausstattung (Kit) angeben!',
'Need to specify a Resource!': 'Sie müssen eine Ressource angeben.',
'Need to specify a bundle!': 'Sie müssen ein Produktpaket angeben!',
'Need to specify a group!': 'Sie müssen einen Gruppe angeben!',
'Need to specify a location to search for.': 'Sie müssen ein Gebiet/Position für die Suche angeben.',
'Need to specify a role!': 'Sie müssen eine Rolle definieren!',
'Need to specify a table!': 'Sie müssen einen Tabellennamen angeben!',
'Need to specify a user!': 'Ein Benutzer muss angegeben werden!',
'Need updated': 'Bedarf aktualisiert',
'Needs Details': 'Details zum Bedarf',
'Needs Maintenance': 'Braucht Wartung',
'Needs to reduce vulnerability to violence': 'Handlungsbedarf um die Anfälligkeit für Gewalt zu verringern',
'Need': 'Bedarf',
'Needs': 'Bedarf',
'Neighborhood': 'Nachbarschaft',
'Neighbouring building hazard': 'Risiko durch benachbarte Gebäude',
'Neonatal ICU': 'Neugeborenen ICU',
'Neonatology': 'Neonatologie',
'Network': 'Netzwerk',
'Neurology': 'Neurologie',
'New Assessment reported from': 'Neue Beurteilung erstellt durch',
'New Certificate': 'Neues Zertifikat',
'New Checklist': 'Neue Prüfliste',
'New Entry': 'Neuer Eintrag',
'New Event': 'Neues Ereignis',
'New Item Category': 'Neue Kategorie für Artikel',
'New Job Role': 'Neue Tätigkeit',
'New Location Group': 'Neue Standortgruppe',
'New Location': 'Neuer Standort/Gebiet',
'New Peer': 'Neuer Peer',
'New Record': 'Neuer Datensatz',
'New Request': 'Neue Anfrage',
'New Role': 'Neue Rolle',
'New Scenario': 'Neues Szenario',
'New Skill': 'Neue Fähigkeit',
'New Solution Choice': 'Neue Lösungswahl',
'New Staff Member': 'Neue Mitarbeiter',
'New Stock Count': 'Neue Anzahl des Lagerbestands',
'New Support Request': 'Neue Unterstützunganfrage',
'New Synchronization Peer': 'Neuer Synchronisations Peer',
'New Team': 'Neues Team',
'New Training Course': 'Neuer Schulungskurs',
'New Volunteer': 'Neuer Freiwilliger',
'New cases in the past 24h': 'Neue Fälle in den letzten 24h',
'New': 'Neu',
'Next': 'Nächste',
'No': 'Nein',
'No Activities Found': 'Keine Aktivitäten gefunden',
'No Alternative Items currently registered': 'Zurzeit sind keine alternativen Artikel registriert',
'No Assessment Summaries currently registered': 'Zurzeit sind keine Beurteilungszusammenfassungen registriert',
'No Assessments currently registered': 'Zurzeit sind keine Beurteilungen registriert.',
'No Assets currently registered in this event': 'Zurzeit sind keine Anlagen zu diesem Ereignis registriert',
'No Assets currently registered in this scenario': 'Zurzeit sind keine Anlagen zu diesem Szenario registriert',
'No Assets currently registered': 'Zurzeit sind keine Anlagen registriert',
'No Baseline Types currently registered': 'Zurzeit sind keine Referenzdatumstypen registriert',
'No Baselines currently registered': 'Zurzeit sind keine Referenzdaten registriert',
'No Brands currently registered': 'Zurzeit sind keine Markenregistriert',
'No Budgets currently registered': 'Zurzeit sind keine Budgets registriert',
'No Bundles currently registered': 'Zurzeit sind keine Produktpakete registriert',
'No Camp Services currently registered': 'Zurzeit sind keine Camp-Leistungen registriert',
'No Camp Types currently registered': 'Zurzeit sind keine Typen von Camps registriert',
'No Camps currently registered': 'Zurzeit sind keine Camps registriert',
'No Catalog Items currently registered': 'Zurzeit sind keine Katalogeinträge registriert',
'No Catalogs currently registered': 'Zurzeit sind keine Kataloge registriert',
'No Checklist available': 'Zurzeit sind keine Checklisten verfügbar',
'No Cluster Subsectors currently registered': 'Zurzeit sind keine Cluster Teilbereiche registriert',
'No Clusters currently registered': 'Zurzeit sind keine Cluster registriert',
'No Commitment Items currently registered': 'Zurzeit sind keine zugesagten Artikel registriert',
'No Commitments': 'Zurzeit sind keine Zusagen registriert',
'No Credentials currently set': 'Derzeit keine Berechtigungen hinterlegt',
'No Details currently registered': 'Zurzeit sind keine Details registriert',
'No Documents found': 'Keine Dokumente gefunden',
'No Donors currently registered': 'Zurzeit sind keine Spender registriert',
'No Events currently registered': 'Zurzeit sind keine Ereignisse registriert',
'No Facilities currently registered in this event': 'Für dieses Ereignis ist zurzeit keine Einrichtung registriert',
'No Facilities currently registered in this scenario': 'Für dieses Szenario ist zurzeit keine Einrichtung registriert.',
'No Feature Layers currently defined': 'Zurzeit sind keine Objekt-Layer definiert',
'No Flood Reports currently registered': 'Zurzeit sind keine Flutberichte registriert',
'No Groups currently defined': 'Zurzeit sind keine Gruppen definiert',
'No Groups currently registered': 'Zurzeit sind keine Gruppen registriert',
'No Hospitals currently registered': 'Zurzeit sind keine Krankenhäuser registriert',
'No Human Resources currently registered in this event': 'Für dieses Ereignis sind zurzeit keine personellen Ressourcen registriert.',
'No Human Resources currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine personellen Ressourcen registriert.',
'No Identification Report Available': 'Kein Identifizierungbericht verfügbar',
'No Identities currently registered': 'Zurzeit sind keine Identitäten registriert',
'No Image': 'Kein Bild',
'No Images currently registered': 'Zurzeit sind keine Bilder registriert',
'No Impact Types currently registered': 'Zurzeit sind keine Auswirkungsarten registriert',
'No Impacts currently registered': 'Zurzeit sind keine Auswirkungen registriert',
'No Incident Reports currently registered': 'Zurzeit sind keine Vorfallberichte registriert',
'No Incoming Shipments': 'Keine eingehenden Lieferungen',
'No Item Categories currently registered': 'Zurzeit sind keine Artikelkategorien registriert',
'No Item Packs currently registered': 'Zurzeit sind keine Artikelpakete registriert',
'No Items currently registered in this Inventory': 'Für diesen Bestand sind zurzeit keine Artikel registriert',
'No Items currently registered': 'Zurzeit sind keine Artikel registriert',
'No Keys currently defined': 'Zurzeit sind keine Schlüssel definiert',
'No Kits currently registered': 'Zurzeit sind keine Ausstattungen (Kits) definiert',
'No Level 1 Assessments currently registered': 'Zurzeit keine Stufe 1 Beurteilungen registriert',
'No Level 2 Assessments currently registered': 'Zurzeit keine Stufe 2 Beurteilungen registriert',
'No Locations currently available': 'Keine Standorte/Gebiete verfügbar',
'No Locations currently registered': 'Zurzeit sind keine Standorte/Gebiete registriert',
'No Map Profiles currently defined': 'Zurzeit sind keine Kartenkonfigurationen definiert',
'No Map Profiles currently registered in this event': 'Für dieses Ereignis sind zurzeit keine Kartenkonfigurationen registriert',
'No Map Profiles currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine Kartenkonfigurationen registriert',
'No Markers currently available': 'Zurzeit sind keine Marker/Symbole verfügbar',
'No Match': 'Keine Übereinstimmung',
'No Matching Catalog Items': 'Keine passenden Katalogelemente',
'No Matching Items': 'Keine passenden Artikel',
'No Matching Records': 'Keine passenden Datensätze',
'No Members currently registered': 'Zurzeit sind keine Mitglieder registriert',
'No Memberships currently defined': 'Zurzeit sind keine Mitgliedschaften definiert',
'No Messages currently in Outbox': 'Zurzeit sind keine Nachrichten im Postausgang',
'No Need Types currently registered': 'Zurzeit sind keine Anforderungstypen registriert',
'No Needs currently registered': 'Zurzeit sind keine Anforderungen registriert',
'No Offices currently registered': 'Zurzeit sind keine Büros registriert',
'No Offices found!': 'Keine Büros gefunden!',
'No Organizations currently registered': 'Zurzeit sind keine Organisationen registriert',
'No options available': 'Keine Optionen verfügbar',
'No People currently registered in this camp': 'Zurzeit sind in diesem Camp keine Personen registriert',
'No People currently registered in this shelter': 'Zurzeit sind in dieser Unterkunft keine Personen registriert',
'No Persons currently registered': 'Zurzeit sind keine Personen registriert',
'No Persons currently reported missing': 'Zurzeit sind keine Personen vermisst gemeldet',
'No Persons found': 'Keine Personen gefunden',
'No Photos found': 'Keine Fotos gefunden',
'No Picture': 'Kein Bild',
'No Population Statistics currently registered': 'Zurzeit sind keine Bevölkerungsstatistiken registriert',
'No Presence Log Entries currently registered': 'Zurzeit gibt es keine Anwesenheitsprotokolleinträge',
'No Problems currently defined': 'Zurzeit sind keine Probleme definiert',
'No Projections currently defined': 'Zurzeit sind keine Kartenprojektionen definiert',
'No Projects currently registered': 'Zurzeit sind keine Projekte registriert',
'No Rapid Assessments currently registered': 'Zurzeit sind keine Schnell-Beurteilungen registriert',
'No Received Items currently registered': 'Zurzeit sind keine erhaltenen Lieferungen registriert',
'No Received Shipments': 'Keine erhaltene Lieferungen',
'No Records currently available': 'Zurzeit sind keine Datensätze registriert',
'No Request Items currently registered': 'Zurzeit sind keine angefragten Artikel registriert',
'No Requests': 'Keine Anfragen',
'No Rivers currently registered': 'Zurzeit sind keine Flüsse registriert',
'No Roles currently defined': 'Zurzeit sind keine Rollen definiert',
'No Rooms currently registered': 'Zurzeit sind keine Räume registriert',
'No Scenarios currently registered': 'Derzeit sind keine Szenarios eingetragenZurzeit sind keine Szenarios registriert',
'No Sections currently registered': 'Zurzeit sind keine Abschnitte registriert',
'No Sectors currently registered': 'Zurzeit sind keine Bereiche registriert',
'No Sent Items currently registered': 'Zurzeit sind keine gesendeten Artikel registriert',
'No Sent Shipments': 'Keine versandten Lieferungen',
'No Settings currently defined': 'Zurzeit sind keine Einstellungen definiert',
'No Shelter Services currently registered': 'Zurzeit sind keine Unterkunftsleistungen registriert',
'No Shelter Types currently registered': 'Zurzeit sind keine Unterkunfttypen registriert',
'No Shelters currently registered': 'Zurzeit sind keine Unterkünfte registriert',
'No Solutions currently defined': 'Zurzeit sind keine Lösungen definiert',
'No Staff Types currently registered': 'Zurzeit sind keine Mitarbeitertypen registriert',
'No Subscription available': 'Keine Abonnements verfügbar',
'No Subsectors currently registered': 'Zurzeit sind keine Teilbereiche registriert',
'No Support Requests currently registered': 'Zurzeit sind keine Unterstützungsanfragen registriert',
'No Survey Answers currently entered.': 'Zurzeit wurden noch keine Antworten auf Umfragen eingegeben.',
'No Survey Questions currently registered': 'Zurzeit wurden noch keine Umfragen-Fragen registriert. ',
'No Survey Series currently registered': 'Zurzeit wurden noch keine Umfragenserie registriert',
'No Survey Template currently registered': 'Zurzeit wurden noch keine Umfragen-Vorlage registriert',
'No Tasks with Location Data': 'Für dieses Gebiet/Standort liegen zurzeit keine Aufgaben vor',
'No Teams currently registered': 'Zurzeit wurden noch keine Teams registriert',
'No Themes currently defined': 'Zurzeit wurden noch keine Themen registriert',
'No Tickets currently registered': 'Zurzeit wurden noch keine Tickets registriert',
'No Tracks currently available': 'Zurzeit sind noch keine Tracks verfügbar',
'No Users currently registered': 'Zurzeit wurden noch keine Benutzer registriert',
'No Volunteers currently registered': 'Zurzeit sind noch keine Freiwilligen registriert',
'No Warehouses currently registered': 'Zurzeit sind noch keine Warenlager registriert',
'No access at all': 'Kein Zugriff',
'No access to this record!': 'Kein Zugriff auf diesen Datensatz!',
'No action recommended': 'Keine Aktion empfohlen',
'No conflicts logged': 'Keine Konflikte protokolliert',
'No contact information available': 'Keine Kontaktinformation verfügbar',
'No contacts currently registered': 'Zurzeit sind noch keine Kontakte registriert',
'No data available': 'Keine Daten verfügbar',
'No data in this table - cannot create PDF!': 'Keine Daten in dieser Tabelle - PDF kann nicht erstellt werden!',
'No databases in this application': 'Keine Datenbanken in dieser Anwendung',
'No dead body reports available': 'Keine Leichenberichte verfügbar',
'No entries found': 'Keine Einträge gefunden',
'No entries matching the query': 'Die Abfrage lieferte keine Einträge',
'No entry available': 'Kein Eintrag verfügbar',
'No location known for this person': 'Für diese Person ist kein Gebiet/Standort bekannt',
'No locations found for members of this team': 'Für Mitglieder dieses Teams ist kein Gebiet/Standort bekannt',
'No log entries matching the query': 'Die Abfrage lieferte keine Protokolleinträge',
'No messages in the system': 'Keine Nachrichten im System',
'No peers currently registered': 'Zurzeit sind keine Peers registriert',
'No pending registrations found': 'Keine anstehenden Registrierungen gefunden',
'No pending registrations matching the query': 'Die Abfrage lieferte keine keine anstehenden Registrierungen',
'No person record found for current user.': 'Kein Personendatensatz für den aktuellen Benutzer gefunden.',
'No problem group defined yet': 'Noch keine Problem-Gruppe definiert',
'No records found': 'Keine Datensätze gefunden',
'No records matching the query': 'Die Abfrage lieferte keine Datensätze',
'No reports available.': 'Keine Berichte verfügbar.',
'No reports currently available': 'Zurzeit sind keine Berichte verfügbar',
'No requests found': 'Keine Anfragen gefunden',
'No resources currently reported': 'Zurzeit sind keine Ressourcen gemeldet',
'No service profile available': 'Kein Leistungsprofil verfügbar',
'No skills currently set': 'Zurzeit sind keine Fähigkeiten festgelegt',
'No staff or volunteers currently registered': 'Zurzeit sind weder Mitarbeiter noch Freiwillige registriert',
'No status information available': 'Keine Statusinformation verfügbar',
'No synchronization': 'Keine Synchronisation',
'No tasks currently registered': 'Zurzeit sind keine Aufgaben registriert',
'No template found!': 'Keine Vorlage gefunden!',
'No units currently registered': 'Zurzeit sind keine Einheiten registriert',
'No volunteer availability registered': 'Zurzeit ist keine Verfügbarkeit von Freiwilligen registriert',
'Non-structural Hazards': 'Nicht-strukturelle Gefahren',
'None (no such record)': 'Nichts (kein entsprechender Datensatz)',
'None': '-',
'Noodles': 'Nudeln',
'Normal Address': 'Normale Adresse',
'Normal Job': 'Normaler Beruf',
'Not Applicable': 'Nicht zutreffend',
'Not Authorised!': 'Nicht berechtigt!',
'Not Possible': 'Nicht möglich',
'Not Set': 'Nicht festgelegt',
'Not Authorized': 'Nicht berechtigt',
'Not installed or incorrectly configured.': 'Nicht installiert oder nicht korrekt konfiguriert.',
'Not yet a Member of any Group': 'Bis jetzt kein Mitglied irgendeiner Gruppe',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Beachten Sie, dass diese Liste nur aktive Freiwillige zeigt. Um alle registrierten Personen im System zu sehen, suchen sie statt dessen auf diesem Bildschirm',
'Notice to Airmen': 'Hinweis für Flieger',
'Notify': 'Benachrichtigen',
'Number': 'Anzahl',
'Number of Barges': 'Zahl der Lastschiffe',
'Number of Columns': 'Anzahl der Spalten',
'Number of Patients': 'Anzahl der Patienten',
'Number of People Required': 'Anzahl der benötigten Personen',
'Number of Rows': 'Anzahl der Reihen',
'Number of Tugboats': 'Zahl der Schleppkähne',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Anzahl von zusätzlichen Betten dieses Typs, die voraussichtlich in den nächsten 24 Stunden in dieser Einheit zur Verfügung stehen werden.',
'Number of alternative places for studying': 'Anzahl von alternativen Orten zum studieren.',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Anzahl von verfügbaren/freien Betten dieses Typs in dieser Einheit zum Zeitpunkt des Berichtes.',
'Number of deaths during the past 24 hours.': 'Anzahl von Toten in den letzten 24 Stunden',
'Number of discharged patients during the past 24 hours.': 'Anzahl der entlassenen Patienten in den vergangen 24 Stunden',
'Number of doctors': 'Anzahl der Ärzte',
'Number of in-patients at the time of reporting.': 'Anzahl von in-Patienten zum Zeitpunkt der Berichterstellung',
'Number of newly admitted patients during the past 24 hours.': 'Anzahl der neu zugewiesenen Patienten innerhalb der letzten 24 Stunden',
'Number of non-medical staff': 'Anzahl des nicht-medizinischen Personals',
'Number of nurses': 'Anzahl der Krankenschwestern',
'Number of private schools': 'Anzahl der privaten Schulen',
'Number of public schools': 'Anzahl der öffentlichen Schulen',
'Number of religious schools': 'Anzahl der religiösen Schulen',
'Number of residential units not habitable': 'Anzahl der nicht bewohnbaren Wohneinheiten',
'Number of residential units': 'Anzahl der Wohneinheiten',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Anzahl der freien/verfügbaren Betten in diesem Krankenhaus. Automatisch aktualisiert aus täglichen Berichten.',
'Number of vacant/available units to which victims can be transported immediately.': 'Anzahl der freien/verfügbaren Einheiten zu denen die Opfer sofort transportiert werden können.',
'Number or Label on the identification tag this person is wearing (if any).': 'Nummer oder Beschriftung auf der Identifikationsmarke den diese Person trägt (falls vorhanden).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Nummer oder Code verwendet markiert den Fundort , z. B. Flaggencode, Koordinaten, Standortnummer oder ähnliches (falls verfügbar)',
'Number': 'Nummer',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Female & Aged 61+': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung über 61',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Male & Aged 61+': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung über 61',
'Nursery Beds': 'Krankenhausbetten',
'Nutrition problems': 'Ernährungsprobleme',
'Nutrition': 'Nahrung',
'Opportunities to Volunteer On-Site?': 'Möglichkeiten für Freiwillige vor Ort?',
'OR Reason': 'oder Grund',
'OR Status Reason': 'oder Statusgrund',
'OR Status': 'oder Status',
'Observer': 'Beobachter',
'Obsolete': 'Veraltet',
'Obstetrics/Gynecology': 'Geburtshilfe/Gynäkologie',
'Office Address': 'Büroadresse',
'Office Details': 'Bürodetails',
'Office Phone': 'Telefon im Büro',
'Office Type': 'Bürotyp',
'Office Types': 'Bürotypen',
'Office added': 'Büro hinzugefügt',
'Office deleted': 'Büro gelöscht',
'Office updated': 'Büro aktualisiert',
'Office': 'Büro',
'Offices & Warehouses': 'Büros & Warenager',
'Offices': 'Büros',
'Offline Sync (from USB/File Backup)': 'Offline-Synchronisation (von USB/Dateisicherung)',
'Offline Sync': 'Offline-Synchronisation',
'Oil Terminal Depth': 'Tiefe des Ölterminals',
'Older people as primary caregivers of children': 'Ältere Menschen als primäre Pfleger von Kindern',
'Older people in care homes': 'Ältere Menschen in Pflegeheimen',
'Older people participating in coping activities': 'Ältere Menschen die sich an Krisenbewältigungsaktivitäten beteiligen',
'Older person (>60 yrs)': 'Ältere Personen (> 60 Jahre)',
'On by default? (only applicable to Overlays)': 'Standardmäßig an? (gilt nur für Overlays)',
'On by default?': 'Standardmäßig an?',
'On Hold': 'Abwarten',
'One Time Cost': 'Einmalige Kosten',
'One time cost': 'Einmalige Kosten',
'One-time costs': 'Einmalige Kosten',
'One-time': 'Einmalig',
'Oops! Something went wrong...': 'Hoppla! Etwas ging schief...',
'Oops! something went wrong on our side.': 'Hoppla! Etwas ging auf unserer Seite schief.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opazität (1 für opaque - undurchsichtig, 0 für vollständig transparent)',
'Opacity': 'Opazität (Undurchsichtigkeit)',
'Open area': 'Offener Bereich',
'Open recent': 'Kürzlich Bearbeitetes öffnen',
'Open': 'Öffnen',
'Opening Times': 'Öffnungszeiten',
'OpenStreetMap Tiles': 'OpenStreetMap Tiles',
'OpenWeatherMap data': 'OpenWeatherMap Daten',
'Operating Rooms': 'Betriebsräume',
'Optional link to an Incident which this Assessment was triggered by.': 'Optinaler Link zum einem Vorfall, der diese Beurteilung auslöste.',
'Optional': 'Optional',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. Wenn Sie die Darstellung der Objekte auf der Basis von Werten eines Attributs festlegen möchten, wählen sie das zu verwendende Attribut hier aus.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. Bei GeoServer, das ist die Arbeitsbereich Namespace-URI (nicht der Name!). Beim WFS "Capabilities", ist dies die Namensteil des FeatureTypes vor dem Doppelpunkt(:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. Der Name eines Elements dessen Inhalt eine URL zu einer Bilddatei die im Dialogfenster angezeigt werden soll.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. Name eines Elements, dessen Inhalt in Dialogfenstern angezeigt wird.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. Name des Schemas. Bei Geoserver wird das Format http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name verwendet.',
'Options': 'Optionen',
'Organization Details': 'Details zur Organisation',
'Organization Domains': 'Organisationsdomains',
'Organization Registry': 'Organisationsdatenbank',
'Organization Type': 'Organisationstyp',
'Organization Types': 'Organisationstypen',
'Organization added': 'Organisation hinzugefügt',
'Organization deleted': 'Organisation gelöscht',
'Organization updated': 'Organisation aktualisiert',
'Organization': 'Organisation',
'Organizations': 'Organisationen',
'Organization/Supplier': 'Organisation/Anbieter',
'Organized By': 'Organisiert durch',
'Origin of the separated children': 'Ursprung der getrennten Kinder',
'Origin': 'Ursprung',
'Other Address': 'Andere Adresse',
'Other (describe)': 'Andere (näher beschreiben)',
'Other (specify)': 'Sonstige (näher spezifizieren)',
'Other Evidence': 'Anderer Nachweis',
'Other Faucet/Piped Water': 'Andere Wasserrohre/-hähne',
'Other Isolation': 'Andere Isolierung',
'Other Name': 'Sonstiger Name',
'Other activities of boys 13-17yrs before disaster': 'Andere Aktivitäten von Jungen 13-17 Jahre vor der Katastrophe',
'Other activities of boys 13-17yrs': 'Andere Aktivitäten der Jungen 13-17 Jahre',
'Other activities of boys <12yrs before disaster': 'Andere Aktivitäten von Jungen <12 Jahre vor der Katastrophe',
'Other activities of boys <12yrs': 'Andere Aktivitäten von Jungen <12 Jahren',
'Other activities of girls 13-17yrs before disaster': 'Andere Aktivitäten von Mädchen 13-17 Jahre vor der Katastrophe',
'Other activities of girls 13-17yrs': 'Andere Aktivitäten von Mädchen 13-17 Jahre',
'Other activities of girls<12yrs before disaster': 'Andere Aktivitäten von Mädchen <12 Jahre vor der Katastrophe',
'Other activities of girls<12yrs': 'Andere Aktivitäten von Mädchen <12 Jahre',
'Other alternative infant nutrition in use': 'Andere alternative Kindernahrung die Verwendung findet.',
'Other alternative places for study': 'Andere alternative Orte zum Lernen',
'Other assistance needed': 'Andere Unterstützung benötigt',
'Other assistance, Rank': 'Andere Unterstützung, Rang',
'Other current health problems, adults': 'Andere aktuelle gesundheitliche Probleme, Erwachsene',
'Other current health problems, children': 'Andere aktuelle gesundheitliche Probleme, Kinder',
'Other events': 'Sonstige Ereignisse',
'Other factors affecting school attendance': 'Andere Faktoren mit Einfluss auf den Schulbesuch',
'Other major expenses': 'Andere große Ausgaben',
'Other non-food items': 'Andere non-food Posten',
'Other recommendations': 'Andere Empfehlungen',
'Other residential': 'Andere Bewohner/innen',
'Other school assistance received': 'Andere erhaltene Schulunterstützung',
'Other school assistance, details': 'Andere Schulhilfe, Einzelheiten',
'Other school assistance, source': 'Herkunft anderer Schulhilfen',
'Other settings can only be set by editing a file on the server': 'Andere Einstellungen können nur durch Bearbeiten einer Datei auf dem Server festgelegt werden',
'Other side dishes in stock': 'Andere Speisen auf Lager',
'Other types of water storage containers': 'Andere Arten von Wassertanks',
'Other ways to obtain food': 'Weitere Möglichkeiten um an Nahrungsmitteln zu gelangen',
'Other': 'Sonstige',
'Outbound Mail settings are configured in models/000_config.py.': 'Abgehende Mail-Einstellungen werden in der Datei models/000_config.py konfiguriert.',
'Outbox': 'Ausgang',
'Outgoing SMS Handler': 'SMS-Handler für ausgehende Informationen',
'Outgoing SMS handler': 'SMS-Handler für ausgehende Informationen',
'Overall Hazards': 'Gefahren insgesamt',
'Overhead falling hazard': 'Gefahr fallender Objekte',
'Overland Flow Flood': 'Überflutung',
'Owned By (Organization/Branch)': 'Gehört (Organisation/Niederlassung)',
'Owned Records': 'Eigene Datensätze',
'Owned Resources': 'Eigene Ressourcen',
'Ownership': 'Eigentum',
'Owning Organization': 'In Eigentum von',
'PIN number': 'PIN Nummer',
'PIN': 'PIN',
'PL Women': 'PL Frauen',
'Pack': 'Packung',
'Packs': 'Packungen',
'Paid': 'Bezahlt',
'Parameters': 'Parameter',
'Parapets, ornamentation': 'Geländer, Verzierung',
'Parent Office': 'Übergeordnetes Büro',
'Parent needs to be of the correct level': 'Übergeordnetes Element muss auf der richtigen Stufe sein',
'Parent needs to be set for locations of level': 'Ein übergeordnetes Element muss für Gebiete/Standorte dieser Stufe existieren',
'Parent needs to be set': 'Ein übergeordnetes Element muss definiert werden',
'Parent': 'Übergeordnetes Element',
'Parents/Caregivers missing children': 'Eltern/Pfleger vermissen Kinder',
'Parser Connections': 'Parser Verbindungen',
'Parsers': 'Parser',
'Partial': 'partiell',
'Participant': 'Teilnehmer',
'Pashto': 'Paschtu',
'Pass': 'Übergeben',
'Passport': 'Reisepass',
'Password': 'Passwort',
'Path': 'Pfad',
'Pathology': 'Pathologie',
'Patients': 'Patienten',
'Payload Height (m)': 'Ladekapazität Höhe (m)',
'Payload Length (m)': 'Ladekapazität Länge (m)',
'Payload Volume (m3)': 'Ladekapazität Volumen (m3)',
'Payload Weight (kg)': 'Ladekapazität Gewicht (kg)',
'Payload Width (m)': 'Ladekapazität Breite (m)',
'Pediatric ICU': 'Kinderklinik ICU',
'Pediatric Psychiatric': 'Kinderpsychiatrie',
'Pediatrics': 'Kinderheilkunde',
'Peer Details': 'Details zu Peers',
'Peer Registration Details': 'Details zur Peer-Registrierung',
'Peer Registration Request': 'Anfrage zu Peer-Registrierung',
'Peer Registration': 'Peer-Registrierung',
'Peer Type': 'Peer Typ',
'Peer UID': 'Peer UID',
'Peer added': 'Peer hinzugefügt',
'Peer deleted': 'Peer gelöscht',
'Peer not allowed to push': 'Peer ist nicht für das pushen von Daten zugelassen',
'Peer registration request added': 'Anfrage zu Peer-Registrierung hinzugefügt',
'Peer registration request deleted': 'Anfrage zu Peer-Registrierung gelöscht',
'Peer registration request updated': 'Anfrage zu Peer-Registrierung aktualisiert',
'Peer updated': 'Peer aktualisiert',
'Peer': 'Peer',
'Pending Requests': 'Anstehende Anfragen',
'Pending': 'Anstehend',
'People Needing Food': 'Personen die Nahrungsmittel brauchen',
'People Needing Shelter': 'Personen die Unterkünfte brauchen',
'People Needing Water': 'Personen die Wasser brauchen',
'People Reservation': 'Gruppe reservieren',
'People Registration': 'Person registrieren',
'People Trapped': 'Eingeschlossene Personen',
'People': 'Personen',
'Performance Rating': 'Ergebnisbeurteilung',
'Permanent Home Address': 'Dauerhafte Heimatadresse',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1 und Person 2 sind möglicherweise Duplikate',
'Person De-duplicator': 'Dubletten in Personen auflösen',
'Person Details': 'Details zur Person',
'Person Registry': 'Personendatenbank',
'Person added to Group': 'Person einer Gruppe hinzugefügt',
'Person added to Team': 'Person einem Team hinzugefügt',
'Person added': 'Person hinzugefügt',
'Person deleted': 'Person gelöscht',
'Person details updated': 'Details zur Person aktualisiert',
'Person interviewed': 'Person befragt',
'Person or OU': 'Person oder Organisationseinheit',
'Person who has actually seen the person/group.': 'Person, die kürzlich die Person/Gruppe gesehen hat',
'Person/Group': 'Person/Gruppe',
'Personal Data': 'Persönliche Daten',
'Personal Effects Details': 'Details zur persönlichen Habe',
'Personal Effects': 'Persönliche Habe',
'Personal Map': 'Persönliche Karte',
'Personal Profile': 'Persönliches Profil',
'Personal impact of disaster': 'Persönliche Auswirkung der Katastrophe',
'Persons in institutions': 'Personen in Institutionen',
'Persons with disability (mental)': 'Personen mit Behinderungen (psychischen)',
'Persons with disability (physical)': 'Personen mit Behinderungen (körperlichen)',
'Person': 'Person',
'Persons by Age Group': 'Personen nach Altersgruppen',
'Persons by Gender': 'Personen nach Geschlecht',
'Persons': 'Personen',
'Phone 1': 'Telefon 1',
'Phone 2': 'Telefon 2',
'Phone #': 'Telefon #',
'Phone': 'Telefon',
'Phone/Business': 'Telefon/Geschäftlich',
'Phone/Emergency': 'Telefon/Notfall',
'Phone/Exchange (Switchboard)': 'Telefon/Exchange (Hauptschalttafel)',
'Photo Details': 'Foto Details',
'Photo Taken?': 'Foto gemacht?',
'Photo added': 'Foto hinzugefügt',
'Photo deleted': 'Foto gelöscht',
'Photo updated': 'Foto aktualisiert',
'Photo': 'Foto',
'Photograph': 'Fotografie',
'Photos': 'Fotos',
'Physical Description': 'Physische Beschreibung',
'Physical Safety': 'Physische Sicherheit',
'Picture upload and finger print upload facility': 'Einrichtung um Foto und Fingerabdruck hochzuladen',
'Picture': 'Bild',
'Place of Recovery': 'Ort der Wiederherstellung',
'Place on Map': 'Auf Karte plazieren',
'Places for defecation': 'Plätze für Kotablagerung',
'Places the children have been sent to': 'Orte an die Kinder geschickt wurden',
'Planned': 'Geplant',
'Planned on': 'Geplant am',
'Planning': 'In Planung',
'Playing': 'Wiedergabe',
'Please correct all errors.': 'Korrigieren Sie bitte alle Fehler.',
'Please enter a first name': 'Bitte geben Sie den Vornamen ein',
'Please enter a site OR a location': 'Bitte geben Sie eine Stelle oder einen Standort/Gebiet an',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Bitte geben sie die ersten Buchstaben der Person/Gruppe ein um die Autovervollständigung zu starten.',
'Please enter the recipient': 'Bitte geben sie den Empfänger ein',
'Please fill this!': 'Bitte hier einfüllen!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Bitte geben Sie die URL der Seite auf die sie sich beziehen, eine Beschreibung dessen, was sie erwartet haben & was wirklich passiert ist.',
'Please report here where you are:': 'Bitte hier angeben, wo sie sich befinden:',
'Please select another level': 'Bitte wählen Sie eine andere Ebene',
'Please select': 'Treffen Sie eine Auswahl',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Bitte melden Sie sich unter Angabe Ihrer Mobilfunknummer an. Das erlaubt uns Ihnen Textnachrichten zu senden. Bitten verwenden Sie die internationale Nummer ein (Deutschland: 0049.... - ohne führende 0).',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Bitte geben Sie alle Probleme und Hindernisse bei der korrekten Behandlung der Krankheit an, im Detail (in Zahlen, falls zutreffend). Sie können auch Vorschläge machen wie die Situation verbessert werden kann.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Please use this field to record any additional information, including any Special Needs.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, einschließlich besonderer Anforderungen, zu hinterlegen.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, wie die Ushahidi Vorgangs-ID, zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Pledge Support': 'Zusage von Unterstützung',
'PO': 'PO',
'PO Number': 'PO Nummer',
'PoI Types': 'PoI Typen',
'POIS': 'PoIs',
'Point': 'Point',
'Points of Interest': 'Points of Interest',
'Poisoning': 'Vergiftung',
'Poisonous Gas': 'Gasvergiftung',
'Police': 'Polizei',
'Pollution and other environmental': 'Verschmutzung und andere Umwelt',
'Polygon reference of the rating unit': 'Polygonale Abgrenzung der Bewertungseinheit',
'Poor': 'Arm',
'Population Statistic Details': 'Details zur Bevölkerungsstatistik',
'Population Statistic added': 'Bevölkerungsstatistik hinzugefügt',
'Population Statistic deleted': 'Bevölkerungsstatistik gelöscht',
'Population Statistic updated': 'Bevölkerungsstatistik aktualisiert',
'Population Statistics': 'Bevölkerungsstatistiken',
'Population and number of households': 'Bevölkerungs- und Haushaltsanzahl',
'Population': 'Belegung',
'Popup Fields': 'Popup Felder',
'Popup Label': 'Popup Beschriftung',
'Porridge': 'Haferbrei',
'Port Closure': 'Hafenschließung',
'Port': 'Port',
'Portable App': 'Portable App',
'Position Catalog': 'Stanpunktkatalog',
'Position added': 'Standpunkt hinzugefügt',
'Position deleted': 'Standpunkt gelöscht',
'Position updated': 'Standpunkt aktualisiert',
'Positions': 'Positionen',
'Postcode': 'PLZ',
'Posted on': 'Geposted auf',
'Posts can be either full pages, embedded within other pages or part of a series (for use as news items or blog posts)': 'Posts können entweder komplette Seiten, die in anderen Seiten eingebettet wurden oder Teile einer Serie sein (z.B. zur Nutzung als Newseintrag oder Blog Post)',
'Poultry restocking, Rank': 'Geflügel auffüllen, Rank',
'Poultry': 'Geflügel',
'Pounds': 'Pfund',
'Power Failure': 'Netzausfall',
'Power': 'Stromversorgung',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Pre-cast connections': 'Beton Verbindungen',
'Preferred Name': 'Bevorzugter Name',
'Pregnant women': 'Schwangere Frauen',
'Preliminary': 'Vorläufig',
'Presence Condition': 'Anwesenheitsbedingung',
'Presence Log': 'Anwesenheitsprotokollierung',
'Presence in the shelter': 'Anwesend in Unterkunft',
'Presence': 'Anwesenheit',
'Previous': 'Vorherige',
'Primary Occupancy': 'Primäre Belegung',
'Priority from 1 to 9. 1 is most preferred.': 'Priorität von 1 bis 9. 1 ist die am meisten bevorzugte.',
'Priority': 'Priorität',
'Privacy': 'Datenschutz',
'Private': 'Privat',
'Problem Administration': 'Verwaltung von Problemen',
'Problem Details': 'Problemdetails',
'Problem Group': 'Problemgruppe',
'Problem Title': 'Problemtitel',
'Problem added': 'Problem hinzugefügt',
'Problem connecting to twitter.com - please refresh': 'Verbindungsproblem zu twitter.com - bitte neu laden',
'Problem deleted': 'Problem gelöscht',
'Problem updated': 'Problem aktualisiert',
'Problem': 'Problem',
'Problems': 'Probleme',
'Procedure': 'Vorgehensweise',
'Process Received Shipment': 'Bearbeiten der erhaltenen Lieferung',
'Process Shipment to Send': 'Vorbereiten der Lieferung zum Versenden',
'Procurement & Logistics cost': 'Kosten für Beschaffung & Logistik',
'Profession': 'Beruf',
'Profile': 'Profil',
'Profile Details': 'Details zum Profil',
'Profile Picture?': 'Profilbild?',
'Program Hours (Month)': 'Programmstunden (Monat)',
'Program Hours (Year)': 'Programmstunden (Jahr)',
'Program': 'Programm',
'Programs': 'Programme',
'Proj4js definition': 'Proj4js Definition',
'Project Details': 'Details zum Projekt',
'Project Name': 'Name des Projekts',
'Project Status': 'Projektstatus',
'Project added': 'Projekt hinzugefügt',
'Project deleted': 'Projekt gelöscht',
'Project has no Lat/Lon': 'Projekt hat keine Geographische Koordinate (lat/lon)',
'Project updated': 'Projekt aktualisiert',
'Project': 'Projekt',
'Projection Details': 'Details zur Kartenprojektion',
'Projection added': 'Kartenprojektion hinzugefügt',
'Projection deleted': 'Kartenprojektion gelöscht',
'Projection updated': 'Kartenprojektion aktualisiert',
'Projection': 'Kartenprojektion',
'Projections': 'Kartenprojektionen',
'Projects': 'Projekte',
'Property reference in the council system': 'Anlage im Behördensystem',
'Proposed': 'Vorgeschlagen',
'Protected resource': 'Geschützte Ressource',
'Protection': 'Schutz',
'Provide Metadata for your media files': 'Stellen Sie Metadaten für Ihre Mediadateien zur Verfügung.',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Stekllen Sie optional eine Skizze des gesamten Gebäudes oder der beschädigten Objekte. Markieren Sie dabei die beschädigte Stellen.',
'Psychiatrics/Adult': 'Psychiatrie/Erwachsene',
'Psychiatrics/Pediatric': 'Psychiatrie/Kinder',
'Public Event': 'Öffentliche Ereignis',
'Public and private transportation': 'Öffentlicher und privater Transport',
'Public assembly': 'Öffentliche Versammlung',
'Public': 'Öffentlich',
'Publish': 'Veröffentlichen',
'Published On': 'Veröffentlicht am',
'Pull tickets from external feed': 'Tickets von externen Feeds laden',
'Purchase Date': 'Kaufdatum',
'Purchase Price': 'Kaufpreis',
'Purchase': 'Kauf',
'Purpose': 'Zweck',
'Push tickets to external system': 'Transferiere Tickets zu externen System',
'Pyroclastic Flow': 'Pyroklastischer Strom',
'Pyroclastic Surge': 'Pyroklastischer Welle',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial-Modul ist innerhalb der aktiven Python Umgebung nicht verfügbar - dieses muss installiert werden um das Modem zu aktivieren.',
'Python needs the ReportLab module installed for PDF export': 'Python braucht das ReportLab-Modul für die PDF-Ausgabe. Dies ist derzeit nicht installiert!',
'Quality/Mode': 'Qualität/Modus',
'Quantity Committed': 'Menge bestätigt',
'Quantity Fulfilled': 'Menge erfüllt',
'Quantity range': 'Mengenumfang',
'Quantity Received': 'Erhaltene Menge',
'Quantity Returned': 'Zurückgegebene Menge',
'Quantity Sent': 'Gesendete Menge',
'Quantity in Transit': 'Menge in Transit',
'Quantity': 'Menge',
'Quarantine': 'Quarantäne',
'Queries': 'Abfragen',
'Query': 'Abfrage',
'Queryable?': 'Abfragbar?',
'RC frame with masonry infill': 'RC Rahmen mit Mauerwerkfüllung',
'RECORD A': 'DATENSATZ A',
'RECORD B': 'DATENSATZ B',
'Race': 'Rasse',
'Radio Callsign': 'Radio Rufzeichen',
'Radiological Hazard': 'Strahlungsgefahr',
'Radiology': 'Radiologie',
'Railway Accident': 'Eisenbahnunfall',
'Railway Hijacking': 'Eisenbahnentführung',
'Rain Fall': 'Regenfall',
'Rapid Assessment Details': 'Details zur Schnell-Beurteilung',
'Rapid Assessment added': 'Schnell-Beurteilung hinzugefügt',
'Rapid Assessment deleted': 'Schnell-Beurteilung gelöscht',
'Rapid Assessment updated': 'Schnell-Beurteilung aktualisiert',
'Rapid Assessment': 'Schnell-Beurteilung',
'Rapid Assessments & Flexible Impact Assessments': 'Schnell-Beurteilungen & flexible Abschätzungen der Auswirkungen',
'Rapid Assessments': 'Schnell-Beurteilungen',
'Rapid Close Lead': 'Schnell Führung schliessen',
'Rapid Data Entry': 'Schnelle Dateneingabe',
'Raw Database access': 'Direkter Datenbankzugriff',
'Ready for Transfer': 'Transferbereit',
'Receive New Shipment': 'Neue Lieferung erhalten',
'Receive Shipment': 'Lieferung erhalten',
'Receive this shipment?': 'Lieferung erhalten?',
'Receive': 'Erhalten',
'Received By Person': 'Erhalten von einer Person',
'Received By': 'Erhalten von',
'Received Item Details': 'Details zum erhaltenen Artikel',
'Received Item deleted': 'Erhaltener Artikel gelöscht',
'Received Item updated': 'Erhaltener Artikel aktualisiert',
'Received Shipment Details': 'Details zur erhaltenen Lieferung',
'Received Shipment canceled and items removed from Inventory': 'Erhaltene Lieferung abgebrochen und Artikel aus dem Bestand entfernt',
'Received Shipment canceled': 'Erhaltene Lieferung abgebrochen',
'Received Shipment updated': 'Erhaltene Lieferung aktualisiert',
'Received Shipments': 'Erhaltene Lieferung',
'Received': 'Erhalten',
'Received date': 'Eingangsdatum',
'Received/Incoming Shipments': 'Erhaltene/Einkommende Lieferungen',
'Receiving and Sending Items': 'Erhalten und Versenden von Artikeln',
'Recipient': 'Empfänger',
'Recipients': 'Empfänger',
'Recipient(s)': 'Empfänger',
'Recommendations for Repair and Reconstruction or Demolition': 'Empfehlungen für Reparatur und Wiederherstellung oder Abriß',
'Record Details': 'Details zum Datensatz',
'Record Saved': 'Datensatz gesichert',
'Record added': 'Datensatz hinzugefügt',
'Record any restriction on use or entry': 'Registrieren jeglicher Einschränkung bei der Nutzung oder Eintragung',
'Record deleted': 'Datensatz gelöscht',
'Record last updated': 'Datensatz zuletzt aktualisiert',
'Record not found!': 'Datensatz nicht gefunden!',
'Record not found': 'Datensatz nicht gefunden',
'Record updated': 'Datensatz aktualisiert',
'Record': 'Datensatz',
'Recording and Assigning Assets': 'Aufzeichnen und Zuweisen von Anlagen',
'Records': 'Datensätze',
'Recovery Request added': 'Bergungsanfrage hinzugefügt',
'Recovery Request deleted': 'Bergungsanfrage gelöscht',
'Recovery Request updated': 'Bergungsanfrage aktualisiert',
'Recovery Request': 'Bergungsanfrage',
'Recovery Requests': 'Bergungsanfragen',
'Recovery': 'Bergung',
'Recurring Cost': 'Wiederkehrende Kosten',
'Recurring Request?': 'Wiederkehrende Anfrage?',
'Recurring cost': 'Wiederkehrende Kosten',
'Recurring costs': 'Wiederkehrende Kosten',
'Recurring': 'Wiederkehrend',
'Red Cross / Red Crescent': 'Rotes Kreuz / Roter Halbmond',
'Red': 'Rot',
'Reference Document': 'Referenzdokument',
'Refresh Rate (seconds)': 'Aktualisierungsrate (Sekunden)',
'Refugees': 'Flüchtlinge',
'Refugee Support Database': 'Flüchtlingshilfe-Datenbank',
'Region': 'Regierungsbezirk',
'Region Location': 'Standort Region',
'Regional': 'Regional',
'Regions': 'Regionen',
'Register Person into this Camp': 'Registrieren der Person in dieses Camp',
'Register Person into this Shelter': 'Registrieren der Person in diese Unterkunft',
'Register Person': 'Registrieren einer Person',
'Register them as a volunteer': 'Als Freiwillige registrieren',
'Register': 'Registrieren',
'Registered People': 'Registrierte Personen',
'Registered users can': 'Registrierte Benutzer können',
'Registered on': 'Registriert am',
'Registration Date': 'Registrierungsdatum',
'Registration Details': 'Details zur Registrierung',
'Registration added': 'Registrierung hinzugefügt',
'Registration entry deleted': 'Anmeldungseintrag gelöscht',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Die Registrierung wartet noch auf die Genehmigung von der Qualifizierenden Stelle (%s) - bitte warten Sie bis Sie eine Bestätigung erhalten',
'Registration updated': 'Anmeldung aktualisiert',
'Registration': 'Registrierung',
'Rehabilitation/Long Term Care': 'Rehabilitation/Langfristige Pflege',
'Reinforced masonry': 'Mauerwerk verstärkt',
'Rejected': 'Zurückgewiesen',
'Relief Team': 'Unterstützungsteam',
'Relief': 'Unterstützung',
'Religious Leader': 'Religiöser Führer',
'Religious': 'Religiös',
'Relocate as instructed in the <instruction>': 'Verlagern wie in der <instruction> angewiesen',
'Remarks': 'Bemerkungen',
'Remove Asset from this event': 'Anlage von diesem Ereignis entfernen',
'Remove Asset from this scenario': 'Anlage von diesem Szenario entfernen',
'Remove Facility from this event': 'Einrichtung von diesem Ereignis entfernen',
'Remove Facility from this scenario': 'Einrichtung von diesem Szenario entfernen',
'Remove Human Resource from this event': 'Personelle Ressource von diesem Ereignis entfernen',
'Remove Human Resource from this scenario': 'Personelle Ressource von diesem Szenario entfernen',
'Remove Incident Type from this event': 'Vorfallstyp von diesem Ereignis entfernen',
'Remove Item from Inventory': 'Artikel aus Bestand entfernen',
'Remove Layer from Profile': 'Löschen der Kartenebene aus dem Profil',
'Remove Map Profile from this event': 'Kartenkonfiguration von diesem Ereignis entfernen',
'Remove Map Profile from this scenario': 'Kartenkonfiguration von diesem Szenario entfernen',
'Remove Person from Group': 'Person aus Gruppe entfernen',
'Remove Person from Team': 'Person aus Team entfernen',
'Remove existing data before import': 'Löschen der existierenden Daten vor dem Import',
'Remove this asset from this event': 'Diese Anlage vom Ereignis entfernen',
'Remove this asset from this scenario': 'Diese Anlage vom Szenario entfernen',
'Remove': 'Entfernen',
'Removed from Group': 'Aus Gruppe entfernt',
'Removed from Team': 'Aus Team entfernt',
'Repacked By': 'Umgepackt von',
'Repair': 'Reparieren',
'Repairs': 'Reparaturen',
'Repaired': 'Repariert',
'Repeat your password': 'Kennwort wiederholen',
'Replace if Master': 'Ersetzen wenn Master',
'Replace if Newer': 'Ersetze, falls neuer',
'Replace': 'Ersetzen',
'Report Another Assessment...': 'Melde andere Beurteilung...',
'Report Details': 'Details zum Bericht',
'Report Options': 'Optionen zum Bericht',
'Report Options': 'Optionen zum Bericht:',
'Report Types Include': 'Berichtstypen beinhalten',
'Report added': 'Bericht hinzugefügt',
'Report deleted': 'Bericht gelöscht',
'Report my location': 'Meinen Standort melden',
'Report of': 'Bericht von',
'Report the contributing factors for the current EMS status.': 'Melde die beitragenen Faktoren für den aktuellen EMS Status',
'Report the contributing factors for the current OR status.': 'Melde die beitragenden Faktoren für den aktuellen OR Status.',
'Report them as found': 'Als gefunden melden',
'Report them missing': 'Als vermisst melden',
'Report updated': 'Bericht aktualisiert',
'Report': 'Bericht',
'Report To': 'Melden bei',
'Reported To': 'Gemeldet bei',
'Reporter Name': 'Name des Meldenden',
'Reporter': 'Meldender',
'Reporting on the projects in the region': 'Berichterstattung über die Projekte in der Region',
'Reports': 'Berichte',
'Repositories': 'Repositories',
'REQ': 'Anfrage',
'REQ Number': 'Anfragenummer',
'RSS Channels': 'RSS Kanäle',
'RSS Posts': 'RSS Posts',
'Request Added': 'Anfrage hinzugefügt',
'Request Canceled': 'Anfrage storniert',
'Request Details': 'Details zur Anfrage',
'Request Templates': 'Anfragevorlagen',
'Requested For Facility': 'Angefragt für Einrichtung',
'Request From': 'Anfrage von',
'Request Item Details': 'Details zur Anfrage nach Artikel',
'Request Item added': 'Anfrage nach Artikel hinzugefügt',
'Request Item deleted': 'Anfrage nach Artikel entfernt',
'Request Item from Available Inventory': 'Anfrage nach Artikel aus verfügbarem Bestand',
'Request Item updated': 'Anfrage nach Artikel aktualisiert',
'Request Item': 'Angefragter Artikel',
'Request Items': 'Angefragte Artikel',
'Request Status': 'Anfragestatus',
'Request Type': 'Anfragetyp',
'Request Updated': 'Anfrage aktualisiert',
'Request added': 'Anfrage hinzugefügt',
'Request deleted': 'Anfrage gelöscht',
'Request for Role Upgrade': 'Rollenupgrade anfordern',
'Request updated': 'Anfrage aktualisiert',
'Request': 'Anfrage',
'Requests': 'Anfragen',
'Request, Response & Session': 'Anfrage, Antwort & Sitzung',
'Requested By Facility': 'Angefragt von Einrichtung',
'Requested By': 'Angefragt durch',
'Requested From': 'Angefragt von',
'Requested Items': 'Angefragte Artikel',
'Requested Skills': 'Angefragte Fähigkeiten',
'Requested by': 'Angefragt durch',
'Requested on': 'Angefragt am',
'Requested': 'Angefragt',
'Requester': 'Anfragender',
'Requests Management': 'Anfragenverwaltung',
'Requests': 'Anfragen',
'Required Skills': 'Benötigte Fähigkeiten',
'Requires Login!': 'Anmeldung erforderlich!',
'Rescue and recovery': 'Rettung und Bergung (SAR)',
'Reset Password': 'Kennwort zurücksetzen',
'Reset': 'Zurücksetzen',
'Residents': 'Bewohner',
'Resolve Conflict': 'Konflikt lösen',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Das verfolgen des Links lässt eine neue Anzeige erscheinen die hilft doppelte Einträge aufzulösen und die Datenbank zu aktualisieren',
'Resolve': 'Auflösen',
'Resource Details': 'Details zur Ressource',
'Resource Inventory': 'Ressourcenbestand',
'Resource Type': 'Ressourcentyp',
'Resource added': 'Ressource hinzugefügt',
'Resource deleted': 'Ressource gelöscht',
'Resource updated': 'Ressource aktualisiert',
'Resource': 'Ressource',
'Resources': 'Ressourcen',
'Respiratory Infections': 'Atemwegsinfektionen',
'Response': 'Antwort',
'Restricted Access': 'Eingeschränkter Zugriff',
'Restricted Use': 'Eingeschränkte Verwendung',
'Result': 'Ergebniss',
'Results': 'Ergebnisse',
'Retail Crime': 'Einzelhandel Kriminalität',
'Retrieve Password': 'Kennwort abrufen',
'Return to Request': 'Zurück zur Anfrage',
'Return': 'Zurück',
'Returned From': 'Zurückgegeben von',
'Returned': 'Zurückgegeben',
'Review Incoming Shipment to Receive': 'Überprüfung der eingehenden Lieferung für die Annahme',
'Rice': 'Reis',
'Rich Text?': 'Rich Text?',
'Riot': 'Aufruhr',
'River Details': 'Details zum Fluss',
'River added': 'Fluss hinzugefügt',
'River deleted': 'Fluss gelöscht',
'River updated': 'Fluss aktualisiert',
'River': 'Fluss',
'Rivers': 'Flüsse',
'Road Accident': 'Verkehrsunfall',
'Road Closed': 'Straße gesperrt',
'Road Conditions': 'Zustand der Straßen',
'Road Delay': 'Verkehrsverzögerung',
'Road Hijacking': 'Straßenentführung',
'Road Usage Condition': 'Strassennutzungszustand',
'Role Details': 'Details zur Rolle',
'Role Name': 'Name der Rolle',
'Role Required': 'Erforderliche Rolle',
'Role Updated': 'Rolle aktualisiert',
'Role added': 'Rolle hinzugefügt',
'Role deleted': 'Rolle gelöscht',
'Role updated': 'Rolle aktualisiert',
'Role': 'Rolle',
'Role-based': 'Rollenbasiert',
'Roles Permitted': 'Zulässige Rollen',
'Roles': 'Rollen',
'Roll On Roll Off Berth': 'Fähranlegestelle',
'Roof tile': 'Dachziegel',
'Roofs, floors (vertical load)': 'Dächer, Böden (vertikale Belastung)',
'Room Details': 'Details zum Raum',
'Room added': 'Raum hinzugefügt',
'Room deleted': 'Raum gelöscht',
'Room updated': 'Raum aktualisiert',
'Room': 'Raum',
'Rooms': 'Räume',
'Rows in table': 'Zeilen in der Tabelle',
'Rows selected': 'Ausgewählte Zeilen',
'Run Interval': 'Intervall der Läufe',
'Runway Length (m)': 'Länge der Landebahn (m)',
'Runway Surface': 'Oberfläche der Landebahn',
'Runway Width (m)': 'Breite der Landebahn (m)',
'Running Cost': 'Laufzeitkosten',
'SMS Modem Channels': 'SMS Modem Kanäle',
'SMS Outbound Gateways': 'SMS Ausgangsgateaways',
'SMS SMTP Channels': 'SMS SMTP Kanäle',
'SMS WebAPI Channels': 'SMS WebAPI Kanäle',
'Safe environment for vulnerable groups': 'Sichere Umgebung für gefährdete Gruppen',
'Safety Assessment Form': 'Formular für Sicherheitsbeurteilung',
'Safety of children and women affected by disaster?': 'Ist die Sicherheit von Kindern und Frauen durch die Katastrophe (resp. das Unglück) beeinträchtigt?',
'Sahana Blue': 'Sahana Blau',
'Sahana Community Chat': 'Sahana Gemeinschaft Chat',
'Sahana Eden <=> Other': 'Sahana Eden <=> Andere',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden - OpenSource Management-Plattform für humanitäre Notsituationen',
'Sahana Eden Website': 'Sahana Eden Internetseite',
'Sahana Steel': 'Sahana Stahl',
'Sahana access granted': 'Sahana Zugriff gewährt',
'Salted Fish': 'Gesalzener Fisch',
'Sanitation problems': 'Sanitäre Probleme',
'Satellite': 'Satellit',
'Saturday': 'Samstag',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Speichern: Standardmäßig Länge/Breite und Zoomfaktor',
'Save': 'Speichern',
'Saved.': 'Gespeichert.',
'Saved Filters': 'Gespeicherte Filter',
'Saving...': 'Wird gespeichert...',
'Scale of Results': 'Umfang der Ergebnisse',
'Scenario Details': 'Details zum Szenario',
'Scenario added': 'Szenario hinzugefügt',
'Scenario deleted': 'Szenario gelöscht',
'Scenario updated': 'Szenario aktualisiert',
'Scenario': 'Szenario',
'Scenarios': 'Szenarios',
'Schedule': 'Zeitplan',
'School Closure': 'Schulschließung',
'School Lockdown': 'Schule geschlossen',
'School Teacher': 'Schullehrer',
'School activities': 'Schulaktivitäten',
'School assistance': 'Schulunterstützung',
'School attendance': 'Schulbesuch',
'School destroyed': 'Schule zerstört',
'School heavily damaged': 'Schule stark beschädigt',
'School tents received': 'Schulzelte erhalten',
'School tents, source': 'Herkunft der Schulzelte',
'School used for other purpose': 'Schule wird für andere Zwecke verwendet',
'School': 'Schule',
'School/studying': 'Schule/lernen',
'Schools': 'Schulen',
'Seaports': 'Seehafen',
'Search Activities': 'Suchaktivitäten',
'Search Activity Report': 'Bericht über Suchaktivitäten',
'Search Addresses': 'Suche nach Adressen',
'Search All Requested Items': 'Alle angefordeten Artikel durchsuchen',
'Search All Requested Skills': 'Alle angefragten Fähigkeiten durchsuchen',
'Search Alternative Items': 'Suche nach alternativen Artikeln',
'Search Assessment Summaries': 'Suche Beurteilungszusammenfassungen',
'Search Assessments': 'Suche Beurteilungen',
'Search Asset Log': 'Suche Anlageprotokoll',
'Search Assets': 'Suche Anlagen',
'Search Baseline Type': 'Referenzdatumstyp suchen',
'Search Baselines': 'Referenzdatum suchen',
'Search Brands': 'Marken suchen',
'Search Budgets': 'Budgets suchen',
'Search Bundles': 'Produktpakete suchen',
'Search Camp Services': 'Camp Leistungen suchen',
'Search Camp Types': 'Camp Typen suchen',
'Search Camps': 'Camps suchen',
'Search Catalog Items': 'Katalog Einträge suchen',
'Search Catalogs': 'Kataloge suchen',
'Search Certificates': 'Zertifikate suchen',
'Search Certifications': 'Zertifizierungen suchen',
'Search Checklists': 'Checklisten suchen',
'Search Cluster Subsectors': 'Cluster Teilbereiche suchen',
'Search Clusters': 'Cluster suchen',
'Search Commitment Items': 'Zugesagte Artikel suchen',
'Search Commitments': 'Zusagen suchen',
'Search Competencies': 'Kompetenzen suchen',
'Search Competency Ratings': 'Kompetenzeinstufungen suchen',
'Search Contact Information': 'Nach Kontaktinformationen suchen',
'Search Contacts': 'Nach Kontakten suchen',
'Search Course Certificates': 'Suchen nach Kurszertifikaten',
'Search Courses': 'Kurse suchen',
'Search Credentials': 'Qualifikationen suchen',
'Search Documents': 'Dokumente suchen',
'Search Donors': 'Spender suchen',
'Search Entries': 'Einträge suchen',
'Search Events': 'Ereignisse suchen',
'Search Facilities': 'Einrichtungen suchen',
'Search Feature Layers': 'Objekt-Ebenen suchen',
'Search Flood Reports': 'Flutberichte suchen',
'Search Groups': 'Gruppen suchen',
'Search Human Resources': 'Personelle Ressourcen suchen',
'Search Identity': 'Identität suchen',
'Search Images': 'Bilder suchen',
'Search Impact Type': 'Auswirkungstypen suchen',
'Search Impacts': 'Auswirkungen suchen',
'Search Incident Reports': 'Vorfallberichte suchen',
'Search Inventory Items': 'Bestandsartikel suchen',
'Search Inventory items': 'Bestandsartikel suchen',
'Search Item Categories': 'Artikelkategorien suchen',
'Search Item Packs': 'Artikelpakete suchen',
'Search Items': 'Artikel suchen',
'Search Job Roles': 'Tätigkeiten suchen',
'Search Keys': 'Sschlüssel suchen',
'Search Kits': 'Ausstattungen (Kits) suchen',
'Search Layers': 'Kartenebenen suchen',
'Search Level 1 Assessments': 'Suche Stufe 1 Beurteilungen',
'Search Level 2 Assessments': 'Suche Stufe 2 Beurteilungen',
'Search Locations': 'Gebiet/Standort suchen',
'Search Log Entry': 'Protokolleintrag suchen',
'Search Map Profiles': 'Kartenkonfiguration suchen',
'Search Markers': 'Marker/Symbol suchen',
'Search Members': 'Mitglied suchen',
'Search Membership': 'Mitgliedschaft suchen',
'Search Missions': 'Aufträge suchen',
'Search Need Type': 'Anforderungstyp suchen',
'Search Needs': 'Anforderungstyp suchen',
'Search Offices': 'Büros suchen',
'Search Organizations': 'Organisationen suchen',
'Search Peer': 'Peer Suchen',
'Search Personal Effects': 'Persönliche Habe suchen',
'Search Persons': 'Personen suchen',
'Search Photos': 'Fotos suchen',
'Search Population Statistics': 'Bevölkerungsstatistiken suchen',
'Search Positions': 'Positionen suchen',
'Search Problems': 'Probleme suchen',
'Search Projections': 'Kartenprojektionen suchen',
'Search Projects': 'Projekte suchen',
'Search Queries': 'Suchabfragen',
'Search Rapid Assessments': 'Schnell-Beurteilung suchen',
'Search Received Items': 'Erhaltene Artikel suchen',
'Search Received Shipments': 'Erhaltene Lieferungen suchen',
'Search Records': 'Datensätze suchen',
'Search Registrations': 'Registrierungen suchen',
'Search Registration Request': 'Registrierungsanfragen suchen',
'Search Report': 'Berichte suchen',
'Search Request Items': 'Angefragte Artikel suchen',
'Search Request': 'Anfrage suchen',
'Search Requested Items': 'Angefragte Artikel suchen',
'Search Requests': 'Anfragen suchen',
'Search Resources': 'Ressourcen suchen',
'Search Rivers': 'Flüsse suchen',
'Search Roles': 'Rollen suchen',
'Search Rooms': 'Räume suchen',
'Search Scenarios': 'Szenarien suchen',
'Search Sections': 'Abschnitte suchen',
'Search Sectors': 'Bereiche suchen',
'Search Sent Items': 'Gesendete Artikel suchen',
'Search Sent Shipments': 'Gesendete Lieferungen suchen',
'Search Service Profiles': 'Leistungsprofile suchen',
'Search Settings': 'Sucheinstellungen',
'Search Shelter Services': 'Unterkunftsleistungen suchen',
'Search Shelter Types': 'Unterkunftsarten suchen',
'Search Shelters': 'Unterkünfte suchen',
'Search Shipped Items': 'Suche über gelieferte Artikel',
'Search Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten suchen',
'Search Skill Provisions': 'Fähigkeits-Bereitstellungen suchen',
'Search Skill Types': 'Fähigkeitstypen suchen',
'Search Skills': 'Fähigkeiten suchen',
'Search Solutions': 'Lösungen suchen',
'Search Staff Types': 'Mitarbeitertypen suchen',
'Search Staff or Volunteer': 'Suche Mitarbeiter oder Freiwillige',
'Search Status': 'Status suchen',
'Search Subscriptions': 'Abonnement suchen',
'Search Subsectors': 'Teilbereiche suchen',
'Search Support Requests': 'Unterstützungsanfragen suchen',
'Search Tasks': 'Aufgaben suchen',
'Search Teams': 'Teams suchen',
'Search Themes': 'Themen suchen',
'Search Tickets': 'Tickets suchen',
'Search Tracks': 'Tracks suchen',
'Search Training Participants': 'Suche Kursteilnehmer',
'Search Trainings': 'Schulung suchen',
'Search Twitter Tags': 'Twitter-Tags suchen',
'Search Units': 'Einheiten suchen',
'Search Users': 'Benutzer suchen',
'Search Volunteer Availability': 'Verfügbarkeit von Freiwilligen suchen',
'Search Volunteers': 'Freiwillige suchen',
'Search Warehouses': 'Warenlager suchen',
'Search and Edit Group': 'Suchen und Bearbeiten von Gruppen',
'Search and Edit Individual': 'Suchen und Bearbeiten von einzelnen Personen',
'Search by Skills': 'Suche nach Fähigkeiten',
'Search by skills': 'Suche nach Fähigkeiten',
'Search for Staff or Volunteers': 'Suche nach Mitarbeitern oder Freiwilligen',
'Search for a Location by name, including local names.': 'Suchen nach Standortnamen, einschließlich lokaler Namen.',
'Search for a Person': 'Such nach einer Person',
'Search for a Project': 'Suche nach einem Projekt',
'Search for a shipment by looking for text in any field.': 'Suche nach einer Lieferung (Volltextsuche)',
'Search for a shipment received between these dates': 'Suche nach einer erhaltenen Lieferung im Zeitraum',
'Search for an Organization by name or acronym': 'Suche nach einer Organisation nach Namen oder Abkürzung',
'Search for an Organization by name or acronym.': 'Suche nach einer Organisation in Namen und Acronym.',
'Search for an asset by text.': 'Suche Anlage über Text.',
'Search for an item by category.': 'Suche Artikel nach Kategorie.',
'Search for an item by text.': 'Suche Artikel über Text.',
'Search for asset by country.': 'Suche Anlage nach Ländern.',
'Search for office by country.': 'Suche Büro nach Ländern.',
'Search for office by organization.': 'Suche Büro nach Organisation.',
'Search for office by text.': 'Suche Büro über Text',
'Search for Persons': 'Suche nach Personen',
'Search for warehouse by country.': 'Suche Warenlager nach Ländern',
'Search for warehouse by organization.': 'Suche Warenlager nach Organisation',
'Search for warehouse by text.': 'Suche Warenlager über Text',
'Search here for a person record in order to:': 'Hier nach einem Personendatensatz suchen, um zu:',
'Search location in Geonames': 'Ortssuche in Geonames',
'Search messages': 'Suche Nachrichten',
'Search': 'Suchen',
'Searching for different groups and individuals': 'Suche nach verschiedenen Gruppen und Einzelpersonen',
'Secondary Server (Optional)': 'Sekundärer Server (optional)',
'Seconds must be a number between 0 and 60': 'Sekunden müssen eine Zahl zwischen 0 und 60 sein',
'Section Details': 'Details zum Abschnitt',
'Section deleted': 'Abschnitt gelöscht',
'Section updated': 'Abschnitt aktualisiert',
'Sections': 'Abschnitte',
'Sector Details': 'Details zum Bereich ',
'Sector added': 'Bereich hinzugefügt',
'Sector deleted': 'Bereich gelöscht',
'Sector updated': 'Bereich aktualisiert',
'Sector': 'Bereich',
'Sector(s)': 'Bereich(e)',
'Sectors': 'Bereiche',
'Secure Storage Capacity': 'Sichere Lagerkapazität',
'Security Status': 'Sicherheitsstatus',
'Security problems': 'Sicherheitsprobleme',
'Security': 'Sicherheit',
'See All Entries': 'Siehe alle Einträge',
'See all': 'Alles anzeigen',
'See unassigned recovery requests': 'Siehe nicht zugeordnete Bergungsanfragen.',
'Select': 'Auswahl',
'Select All': 'Alles auswählen',
'Select Items from the Request': 'Wählen sie Artikel aus der Anfrage',
'Select Items from this Inventory': 'Wählen sie Artikel aus diesem Bestand',
'Select Land': 'Land auswählen',
'Select Modules for translation': 'Auswahl der Module zum Übersetzen',
'Select a location': 'Wählen Sie einen Ort aus',
'Select a question from the list': 'Wählen sie eine Frage aus der Liste aus',
'Select a range for the number of total beds': 'Wählen sie einen Bereich für die Gesamtanzahl von Betten',
'Select all that apply': 'Wählen Sie alles Zutreffende aus',
'Select an Organization to see a list of offices': 'Wählen Sie eine Organisation aus, um eine Liste der zugehörigen Büros anzuzeigen.',
'Select resources to import': 'Wählen Sie Ressourcen zum Importieren aus',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Wählen sie die overlays für die Beurteilungen und die zugehörigen Aktivitäten um die Differenz zu identifizieren.',
'Select the person assigned to this role for this project.': 'Wählen Sie die Person die mit diesr Rolle dem Projekt zugeordnet werden soll.',
'Select to show this configuration in the Regions menu.': "Auswahl um sich diese Konfiguration im Menu 'Regionen' anzeigen.",
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Auswahl ob ein Modem, Tropo oder eine andere Schnittstelle zum Versand von SMS verwendet werden soll.',
'Send Alerts using Email &/or SMS': 'Senden von Alarmen unter Nutzung von E-Mail und/oder SMS',
'Send Commitment as Shipment': 'Zusage Lieferung zu senden',
'Send Message': 'Nachricht senden',
'Send New Shipment': 'Neue Lieferung senden',
'Send Notification': 'Benachrichtigung senden',
'Send Shipment': 'Lieferung senden',
'Send Task Notification': 'Auftragsbenachrichtigung senden',
'Send a message to this person': 'Dieser Person eine Nachricht senden',
'Send a message to this team': 'Diesem Team eine Nachricht senden',
'Send from %s': 'Senden von %s',
'Send message': 'Nachricht senden',
'Send new message': 'Neue Nachricht senden',
'Send': 'Senden',
'Sends & Receives Alerts via Email & SMS': 'Schickt & empfängt Benachrichtigungen über Email und SMS',
'Sent By Person': 'Gesendet von einer Person',
'Sent By': 'Gesendet von',
'Sent Emails': 'Gesendete E-Mails',
'Sent Item Details': 'Details zum versendeten Artikel',
'Sent Item deleted': 'Gesendeter Artikel gelöscht',
'Sent Item updated': 'Gesendeter Artikel aktualisiert',
'Sent Posts': 'Gesendete Posts',
'Sent Shipment Details': 'Details zur gesendeten Lieferungsdetails',
'Sent Shipment canceled and items returned to Inventory': 'Gesendete Lieferung storniert und Artikel zum Lager zurückgebracht',
'Sent Shipment canceled': 'Gesendete Lieferung storniert',
'Sent Shipment updated': 'Gesendete Lieferung aktualisiert',
'Sent Shipments': 'Gesendete Lieferungen',
'Sent SMS': 'Gesendete SMS',
'Sent date': 'Versanddatum',
'Sent': 'gesendet',
'Separated children, caregiving arrangements': 'von Eltern getrennte Kinder, Pflegevereinbarungen',
'Serial Number': 'Seriennummer',
'Series': 'Serie',
'Server': 'Server',
'Service Catalog': 'Leistungskatalog',
'Service Record': 'Leistungseintrag',
'Service or Facility': 'Leistung oder Einrichtung',
'Service profile added': 'Leistungsprofil hinzugefügt',
'Service profile deleted': 'Leistungsprofil gelöscht',
'Service profile updated': 'Leistungsprofil aktualisiert',
'Service': 'Leistung',
'Services Available': 'Verfügbare Leistungen',
'Services': 'Leistungen',
'Set Base Site': 'Basisstandort festlegen',
'Set By': 'Definiert durch',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': "Wählen sie 'Wahr' um Benutzern, die nicht Karten-Admins sind, zu erlauben dieses Level der Gebietshierachie zu verändern.",
'Setting Details': 'Details konfigurieren',
'Setting added': 'Einstellung hinzugefügt',
'Setting deleted': 'Einstellungen gelöscht',
'Setting updated': 'Einstellung aktualisiert',
'Settings updated': 'Einstellungen aktualisiert',
'Settings were reset because authenticating with Twitter failed': 'Einstellungen wurden zurückgesetzt da die Authentifizierung mit Twitter fehlgeschlagen ist',
'Settings which can be configured through the web interface are available here.': 'Die Einstellungen, die über das Webinterface konfiguriert werden können, sind hier verfügbar.',
'Settings': 'Einstellungen',
'Severe': 'Ernsthaft',
'Severity': 'Wertigkeit',
'Sex': 'Geschlecht',
'Share a common Marker (unless over-ridden at the Feature level)': 'Definiere einen allgemeinen Marker/Symbol (kann auf Objekt-Ebene überschrieben werden)',
'Shelter & Essential NFIs': 'Unterkünfte & Essentielle NFIs',
'Shelter Details': 'Details zur Unterkunft',
'Shelter Name': 'Name der Unterkunft',
'Shelter Registration Status': 'Registrierungsstatus',
'Shelter Registry': 'Unterkunft Register',
'Shelter Service Details': 'Details zur Unterkunftsleistung',
'Shelter Service added': 'Unterkunftsleistung hinzugefügt',
'Shelter Service deleted': 'Unterkunftsleistung gelöscht',
'Shelter Service updated': 'Unterkunftsleistung aktualisiert',
'Shelter Service': 'Unterkunftsleistung',
'Shelter Services': 'Unterkunftsleistungen',
'Shelter Settings': 'Eigenschaften der Unterkunft',
'Shelter Type Details': 'Details zum Unterkunftstyp',
'Shelter Type added': 'Unterkunftstyp hinzugefügt',
'Shelter Type deleted': 'Unterkunftstyp gelöscht',
'Shelter Type updated': 'Unterkunftstyp aktualisiert',
'Shelter Type': 'Unterkunftstyp',
'Shelter Types and Services': 'Unterkunftstypen und -leistungen',
'Shelter Types': 'Unterkunftstypen',
'Shelter added': 'Unterkunft hinzugefügt',
'Shelter deleted': 'Unterkunft gelöscht',
'Shelter updated': 'Unterkunft aktualisiert',
'Shelter': 'Unterkunft',
'Shelter/NFI Assistance': 'Unterkunft/ NFI Hilfe',
'Shelters': 'Unterkünfte',
'Shipment Created': 'Lieferung erstellt',
'Shipment Items received by Inventory': 'Lieferungsartikel aus Bestand empfangen',
'Shipment Items sent from Inventory': 'Lieferungsartikel von Bestand gesendet',
'Shipment Items': 'Lieferungsartikel',
'Shipment Type': 'Typ der Lieferung',
'Shipment to Send': 'Zu sendende Lieferung zu senden',
'Shipments To': 'Lieferungen nach',
'Shipments': 'Lieferungen',
'Shipping cost': 'Lieferkosten',
'Shooting': 'Filmaufnahme',
'Short Assessment': 'Kurz Beurteilung',
'Short Description': 'Kurzbeschreibung',
'Show %(number)s entries': 'Zeige %(number)s Einträge',
'Show Checklist': 'Checkliste anzeigen',
'Show Details': 'Details anzeigen',
'Show Location?': 'Gebiet/Standort anzeigen?',
'Show Map': 'Karte anzeigen',
'Show Region in Menu?': 'Region im Menu anzeigen?',
'Show author picture?': 'Bild des Authors anzeigen?',
'Show on Map': 'Auf Karte anzeigen',
'Show on map': 'Auf Karte anzeigen',
'Show totals': 'Summen anzeigen',
'Show': 'Zeige',
'Showing _START_ to _END_ of _TOTAL_ entries': 'Einträge _START_ bis _END_ von _TOTAL_',
'Showing 0 to 0 of 0 entries': 'Keine Einträge',
'Sign-up as a volunteer': 'Als Freiwilliger anmelden',
'Sign-up for Account': 'Für Benutzerkennung anmelden',
'Sign-up succesful - you should hear from us soon!': 'Registrierung erfolgreich - sie werden in Kürze von uns hören.',
'simplified/slow': 'vereinfacht/langsam',
'Site Administration': 'Administration der Seite',
'Site': 'Standort',
'Site Needs': 'Standortbedarf',
'Add Site Needs': 'Standortbedarf hinzufügen',
'Edit Site Needs': 'Standortbedarf ändern',
'Delete Site Needs': 'Standortbedarf löschen',
'Site Needs added': 'Standortbedarf hinzugefügt',
'Site Needs updated': 'Standortbedarf aktualisiert',
'Site Needs deleted': 'Standortbedarf gelöscht',
'Situation Awareness & Geospatial Analysis': 'Situationseinschätzung & Räumliche Analyse',
'Sketch': 'Skizze',
'Skill Catalog': 'Fähigkeitskatalog',
'Skill Details': 'Details zur Fähigkeit',
'Skill Equivalence Details': 'Details zur Fähigkeits-Vergleichbarkeit',
'Skill Equivalence added': 'Fähigkeits-Vergleichbarkeit hinzugefügt',
'Skill Equivalence deleted': 'Fähigkeits-Vergleichbarkeit gelöscht',
'Skill Equivalence updated': 'Fähigkeits-Vergleichbarkeit aktualisiert',
'Skill Equivalence': 'Fähigkeits-Vergleichbarkeit',
'Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten',
'Skill Provision Catalog': 'Fähigkeiten Bestimmungskatalog',
'Skill Provision Details': 'Fähigkeiten Bestimmung Details',
'Skill Provision added': 'Geschick Bestimmung hinzugefügt',
'Skill Provision deleted': 'Fähigkeitenbestimmung gelöscht',
'Skill Provision updated': 'Fähigkeiten Bestimmung aktualisiert',
'Skill Provision': 'Geschick Bestimmung',
'Skill Provisions': 'Fähigkeits-Bereitstellungen',
'Skill Status': 'Fähigkeitsstatus',
'Skill TYpe': 'Art der Fähigkeit',
'Skill Type Catalog': 'Fähigkeitstypen-Katalog',
'Skill Type Details': 'Details zum Fähigkeitstyp',
'Skill Type added': 'Fähigkeitstyp hinzugefuegt',
'Skill Type deleted': 'Fähigkeitstyp gelöscht',
'Skill Type updated': 'Fähigkeitstyp aktualisiert',
'Skill Types': 'Fähigkeitstypen',
'Skill added': 'Fähigkeit hinzugefügt',
'Skill deleted': 'Fähigkeit gelöscht',
'Skill updated': 'Fähigkeit aktualisiert',
'Skill': 'Kenntnisse',
'Skills Catalog': 'Fähigkeiten Katalog',
'Skills Management': 'Fähigkeiten Management',
'Skills': 'Fähigkeiten',
'Skype ID': 'Skype ID',
'Slope failure, debris': 'Abhang Bruch, Schutt',
'Small Trade': 'Kleiner Handel',
'Smoke': 'Rauch',
'Snapshot Report': 'Bericht zur aktuellen Lage',
'Snapshot': 'Momentaufnahme',
'Snow Fall': 'Schneefall',
'Snow Squall': 'Schneeschauer',
'Soil bulging, liquefaction': 'Boden aufgequollen, Verflüssigung',
'Solid waste': 'Feste Abfälle',
'Solution Details': 'Details zur Lösung',
'Solution Item': 'Lösungselement',
'Solution added': 'Lösung hinzugefügt',
'Solution deleted': 'Lösung gelöscht',
'Solution updated': 'Lösung aktualisiert',
'Solution': 'Lösung',
'Solutions': 'Lösungen',
'Some': 'Einige',
'Sorry that location appears to be outside the area of the Parent.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs des übergeordneten Elements zu liegen.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs zu liegen, der von dieser Anwendung unterstützt wird.',
'Sorry, I could not understand your request': 'Entschuldigung, leider konnte ich ihre Anfrage nicht verstehen',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt Gruppen von Standorten/Gebieten zu erstellen.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt diese Standorte/Gebiete zu bearbeiten',
'Sorry, something went wrong.': 'Entschuldigung, leider is etwas schief gelaufen.',
'Sorry, that page is forbidden for some reason.': 'Entschuldigung, leider der Besuch dieser Seite aus einem bestimmten Grund nicht zulässig.',
'Sorry, that service is temporary unavailable.': 'Entschuldigung, leider steht dieses Service vorübergehend nicht zur Verfügung.',
'Sorry, there are no addresses to display': 'Entschuldigung, leider sind keine Adressen vorhanden um angezeigt zu werden.',
'Sought': 'Gesucht',
'Source ID': 'Quellen ID',
'Source Time': 'Zeit der Quelle',
'Source': 'Quelle',
'Sources of income': 'Einkommsquellen',
'Space Debris': 'Weltraumschrott',
'Spanish': 'Spanisch',
'Special Ice': 'Besonderes Eis',
'Special Marine': 'Spezielles Wasserfahrzeug',
'Specialized Hospital': 'Spezialisiertes Krankenhaus',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Bestimmter Bereich (z.B. Gebäude/Raum) innerhalb eines Ortes in der diese Person/Gruppe gefunden werden kann.',
'Specific locations need to have a parent of level': 'Bestimmte Orte benötigen ein übergeordnetes Element der Stufe',
'Specify a descriptive title for the image.': 'Geben Sie einen beschreibenden Titel für das Bild an.',
'Specify the bed type of this unit.': 'Geben Sie den Bettentypen an für diese Einheit an.',
'Specify the number of available sets': 'Geben Sie die Anzahl der verfügbaren Sätze an',
'Specify the number of available units (adult doses)': 'Geben Sie die Anzahl der verfügbaren Einheiten ein (Dosis für Erwachsene)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Geben Sie die Anzahl der verfügbaren Einheiten (in Liter) von Ringer-Lactat oder gleichwertige Lösungen ein',
'Specify the number of sets needed per 24h': 'Geben Sie die Anzahl der erforderlichen Sätze pro 24h ein',
'Specify the number of units (Erwachsenendosen) needed per 24h': 'Geben Sie die Anzahl der Einheiten ein (Dosis für Erwachsene) die pro 24h benötigt werden.',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Geben Sie die Anzahl der Einheiten (in Liter) von Ringer-Lactat oder gleichwertigen Lösungen ein, die man pro 24h braucht.',
'Spherical Mercator?': 'Spherische Mercator?',
'Spreadsheet Importer': 'Import von Tabellendokumenten',
'Spreadsheet uploaded': 'Tabellendokument hochgeladen',
'Squall': 'Sturmschauer',
'Staff & Volunteers': 'Mitarbeiter & Freiwillige',
'Staff & Volunteers (Combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff ID': 'Mitarbeiter-ID',
'Staff Management': 'Mitarbeitermanagement',
'Staff Member Details': 'Details zum Mitarbeiter',
'Staff Member added': 'Mitarbeiter hinzugefügt',
'Staff Members': 'Mitarbeiter',
'Staff Record': 'Mitarbeiterakte',
'Staff Report': 'Mitarbeiterbericht',
'Staff Type Details': 'Details zum Mitarbeitertyp',
'Staff Type added': 'Mitarbeitertyp hinzugefügt.',
'Staff Type deleted': 'Mitarbeitertyp gelöscht',
'Staff Type updated': 'Mitarbeitertyp aktualisiert',
'Staff Types': 'Mitarbeitertypen',
'Staff and Volunteers': 'Mitarbeiter und Freiwillige',
'Staff & Volunteers (combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff member added': 'Mitarbeiter hinzugefügt',
'Staff present and caring for residents': 'Mitarbeiter ist anwesend und versorgt die Anwohner.',
'Staff with Contracts Expiring in the next Month': 'Mitarbeiter deren Veträge im Laufe des nächsten Monats ablaufen',
'Staff': 'Mitarbeiter',
'Staffing': 'Mitarbeiterausstattung',
'Stairs': 'Treppen',
'Start Date': 'Startdatum',
'Start date': 'Startdatum',
'Start of Period': 'Beginn einer Periode',
'State': 'Bundesland',
'State / Province': 'Staat / Bundesland',
'State /Province': 'Staat / Bundesland',
'Stationery': 'Büromaterial',
'Status Report': 'Statusbericht',
'Status Reports': 'Statusberichte',
'Status Updated': 'Status aktualisiert',
'Status added': 'Status hinzugefügt',
'Status deleted': 'Status gelöscht',
'Status of clinical operation of the facility.': 'Status von klinischen Möglichkeiten dieser Einrichtung.',
'Status of general operation of the facility.': 'Status von allgemeinen Möglichkeiten dieser Einrichtung.',
'Status of morgue capacity.': 'Status der Leichenhallenkapazität',
'Status of operations of the emergency department of this hospital.': 'Status von Möglichkeiten der Notaufnahme dieses Krankenhauses.',
'Status of security procedures/access restrictions in the hospital.': 'Status von Sicherheitsverfahren/Zugriffsbeschränkung in diesem Krankenhaus.',
'Status of the operating rooms of this hospital.': 'Der Status des Betriebsräume des Krankenhauses.',
'Status updated': 'Status aktualisiert',
'Status': 'Status',
'Steel frame': 'Stahlrahmen',
'Stock': 'Bestand',
'Stock Counts': 'Bestandszahlen',
'Stock in Warehouse': 'Bestand im Warenlager',
'Stolen': 'Gestohlen',
'Store spreadsheets in the Eden database': 'Speichere Tabellendokument in die Eden Datenbank',
'Storeys at and above ground level': 'Stockwerke auf und über der Erdoberfläche',
'Storm Force Wind': 'Sturm Kraft Wind',
'Storm Surge': 'Sturm Spitzenauslastung',
'Stowaway': 'Blinder Passagier',
'Street Address': 'Adresse',
'Strong Wind': 'Starker Wind',
'Structural Hazards': 'Strukturelle Gefahren',
'Structural': 'Strukturell',
'Styles': 'Styles/Symbolisierungen',
'Style Field': 'Style-Feld',
'Style Values': 'Style-Werte',
'Sub-type': 'Unterart',
'Subject': 'Betreff',
'Submission successful - please wait': 'Absenden erfolgreich - bitte warten',
'Submission successful - please wait...': 'Absenden erfolgreich - bitte warten ...',
'Submit New (full form)': 'Daten erneut absenden (vollständiges Formular)',
'Submit New (triage)': 'Daten erneut absenden (Auswahl)',
'Submit New': 'Daten erneut absenden',
'Submit a request for recovery': 'Registrieren einer Bergungsanfrage',
'Submit new Level 1 assessment (full form)': 'Absenden einer neuen Stufe 1 Beurteilung (vollständiges Formular)',
'Submit new Level 1 assessment (triage)': 'Absenden einer neuen Stufe 1 Beurteilung (Auswahl)',
'Submit new Level 2 assessment': 'Absenden einer neuen Stufe 2 Beurteilung',
'Submit': 'Abschicken',
'Subscription Details': 'Details zum Abo',
'Subscription added': 'Abo hinzugefügt',
'Subscription deleted': 'Abo gelöscht',
'Subscription updated': 'Abo aktualisiert',
'Subscriptions': 'Abonnements',
'Subsector Details': 'Details zum Teilbereich',
'Subsector added': 'Teilbereich hinzugefügt',
'Subsector deleted': 'Teilbereich gelöscht',
'Subsector updated': 'Teilbereich aktualisiert',
'Subsector': 'Teilbereich',
'Subsectors': 'Teilbereich',
'Subsistence Cost': 'Verpflegungskosten',
'Suburb': 'Vorort',
'Suggest not changing this field unless you know what you are doing.': 'Bitte ändern sie diesen Bereich nur, wenn sie ganz genau wissen was sie da tun!!!!',
'Summary by Administration Level': 'Zusammenfassung nach Verwaltungsstufe',
'Summary of Incoming Supplies': 'Zusammenfassung der eingehenden Vorräte',
'Summary of Releases': 'Zusammenfassung der Releases',
'Summary': 'Zusammenfassung',
'Sunday': 'Sonntag',
'Supplier/Donor': 'Lieferant/Spender',
'Suppliers': 'Lieferanten',
'Supply Chain Management': 'Versorgungsketten-Management',
'Support provided': 'Durchgeführte Massnahmen',
'Support Request': 'Unterstützungsanforderung',
'Support Requests': 'Unterstützungsanforderungen',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Unterstützt den Entscheidungsprozess von großen Gruppen von Krisenmanagementexperten indem man den Gruppen ermöglicht Prioritätenlisten aufzustellen.',
'Surgery': 'Chirugie',
'Survey Answer Details': 'Details zur Umfrage-Antwort',
'Survey Answer added': 'Umfrage-Antwort hinzugefügt',
'Survey Answer deleted': 'Umfrage-Antwort gelöscht',
'Survey Answer updated': 'Umfrage-Antwort aktualisiert',
'Survey Answer': 'Umfrage-Antwort',
'Survey Module': 'Umfrage Modul',
'Survey Name': 'Name der Umfrage',
'Survey Question Details': 'Details zur Umfrage-Frage',
'Survey Question Display Name': 'Angezeigter Name der Umfrage-Frage',
'Survey Question added': 'Umfrage-Frage hinzugefügt',
'Survey Question deleted': 'Umfrage-Frage gelöscht',
'Survey Question updated': 'Umfrage-Frage aktualisiert',
'Survey Question': 'Umfrage-Frage',
'Survey Series Details': 'Details zur Umfragenserie',
'Survey Series Name': 'Angezeigter Name der Umfrageserie',
'Survey Series added': 'Umfrageserie hinzugefügt',
'Survey Series deleted': 'Umfrageserie gelöscht',
'Survey Series updated': 'Umfrageserie aktualisiert',
'Survey Series': 'Umfrageserien',
'Survey Template Details': 'Details zur Umfragenvorlage',
'Survey Template added': 'Umfragenvorlage hinzugefügt',
'Survey Template deleted': 'Umfragenvorlage gelöscht',
'Survey Template updated': 'Umfragevorlage aktualisiert',
'Survey Template': 'Umfragenvorlage',
'Survey Templates': 'Umfragenvorlagen',
'Surveys': 'Umfragen',
'Suspended': 'Gesperrt',
'Suspended Cases': 'Gesperrte Fälle',
'Switch to 3D': 'In Google Earth anzeigen',
'Symbology': 'Symbolisierung',
'Sync Conflicts': 'Synchronisierungskonflikte',
'Sync History': 'Synchronisierungshistorie',
'Sync Now': 'Jetzt synchronisieren',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Partner für die Synchronisation sind Instanzen von Peers (SahanaEden, SahanaAgasti, Ushahidi, etc. ) mit denen die aktuelle Intanz synchronisiert werden soll. Ein Klick auf den Link rechts bringt Sie zur Seite auf der Sie diese hinzufügen, suchen und ändern können.',
'Sync Partners': 'Partner für die Synchronisation',
'Sync Pools': 'Synchronisierungspools',
'Sync Schedule': 'Synchronisierungszeitplan',
'Sync Settings': 'Synchronisierungseinstellungen',
'Sync process already started on': 'Sync-Prozess bereits gestartet am',
'Synchronisation': 'Synchronisierung',
'Synchronization Conflicts': 'Synchronisierungskonflikte',
'Synchronization Details': 'Synchronisierung - Details',
'Synchronization History': 'Synchronisierungsgeschichte',
'Synchronization Peers': 'Synchronisierung von Peers',
'Synchronization Settings': 'Synchronisierungseinstellungen',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Die Synchronisation erlaubt ihnen Daten gemeinsam zu nutzen, indem ihre eigene Datenbank mit aktuellen Daten anderer aktualisieren oder umgekehrt. Diese Seite informiert sie darüber wie sie das automatische Synchronisationsfeature von Sahana Eden verwenden.',
'Synchronization not configured.': 'Synchronisierung nicht konfiguriert.',
'Synchronization settings updated': 'Synchronisierungseinstellungen wurden aktualisiert',
'Synchronization': 'Synchronisierung',
'Syncronisation History': 'Synchronisierungshistorie',
'Table': 'Tabelle',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Unterkunft aufsuchen oder <instruction>',
'Task Details': 'Details zur Aufgabe',
'Task List': 'Aufgabenliste',
'Task Status': 'Aufgabenstatus',
'Task added': 'Aufgabe hinzugefügt',
'Task deleted': 'Aufgabe gelöscht',
'Task updated': 'Aufgabe aktualisiert',
'Tasks': 'Aufgaben',
'Team Description': 'Teambeschreibung',
'Team Details': 'Details zum Team',
'Team Id': 'Team ID',
'Team Leader': 'Teamleiter',
'Team Member added': 'Teammitglied hinzugefügt',
'Team Members': 'Teammitglieder',
'Team Name': 'Name des Teams',
'Team Type': 'Type des Teams',
'Team added': 'Team hinzugefügt',
'Team deleted': 'Team gelöscht',
'Team updated': 'Team aktualisiert',
'Technical testing only, all recipients disregard': 'Diese Benachrichtung ist ein technischer Test, bitte ignorieren',
'Telecommunications': 'Telekommunikation',
'Telephone': 'Telefon',
'Telephony': 'Telefonie',
'Temp folder %s not writable - unable to apply theme!': 'Temporärer Ordner %s nicht beschreibbar - Layout (theme) kann nicht angewandt werden!',
'Template Name': 'Name der Vorlage',
'Template file %s not readable - unable to apply theme!': 'Template Datei %s nicht lesbar - Layout (theme) kann nicht angewandt werden!',
'Templates': 'Vorlagen',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Begriff für die 5. Ebene der Verwaltungshierarchie eines Landes (z.B. eine Wahl- oder Postleitzahlenbereich). Diese Stufe wird nicht oft verwendet.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Begriff für die 4. Ebene der Verwaltungshierarchie eines Landes (z.B. Dorf, Stadtteil).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Begriff für die 1. Ebene der Verwaltungshierarchie eines Landes (z. B. Staat oder Bundesland).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Begriff für die 2. Ebene der Verwaltungshierarchie eines Landes (z. B. Regierungsbezirk oder Landkreis).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Begriff für die 3. Ebene der Verwaltungshierarchie eines Landes (z. B. Ort oder Stadt).',
'Term for the top-level administrative division (i.e. Country).': 'Begriff für die Verwaltung der höchsten Ebene (d. h. Land).',
'Test Results': 'Testergebnisse',
'Territorial Authority': 'Territoriale Behörde',
'Terrorism': 'Terrorismus',
'Tertiary Server (Optional)': 'Tertiärer Server (Optional)',
'Text Color for Text blocks': 'Text Farbe für Text Blöcke',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Danke für die Validierung Ihrer E-Mail. Ihr Benutzeraccount wurde vom Systemadministrator noch nicht genehmigt (%s). Sie werden eine Benachrichtigung per E-Mail erhalten wenn ihr Account aktiviert wurde.',
'Thanks for your assistance': 'Danke für Ihre Hilfe',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'Die "query" ist eine Bedingung für "db.table1.field1==\'value\'". Irgendetwas wie "db.table1.field1 == db.table2.field2" führt zu einem SQL JOIN.',
'The Area which this Site is located within.': 'Der Bereich, in dem sich dieser Ort befindet.',
'The Assessments module allows field workers to send in assessments.': 'Das Beurteilungsmodul erlaubt allen Aussendienstmitarbeitern ihre Beurteilungen einzusenden.',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyze': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt Antworten auf Beurteilungen spezieller Ereignisse zu sammeln und auszuwerten',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt es Antworten zu speziellen Ereignissen zu sammeln und zu analysieren',
'The Author of this Document (optional)': 'Der Auto dieses Dokumentes (optional)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Das Gebäudebeurteilungsmodul erlaubt die Sicherheit eines Gebäudes zu beurteilen, z. B. nach einem Erdbeben.',
'The Camp this Request is from': 'Das Camp von dem diese Anfrage stammt',
'The Camp this person is checking into.': 'Das Camp, in das diese Person überführt wird',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Die aktuelle Position der Person/Gruppe, welche ungenau (für die Berichterstellung) oder genau (zur Anzeige von auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Die E-mail Adresse an welche die Genehmigungen gesendet werden (normalerweise ist das eine Gruppen-Mail, keine Adresse einer Einzelperson) Wenn das Feld leer ist, dann werden Anforderungen automatisch genehmigt, wenn die Domänennamen übereinstimmen.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Das Vorfall Berichtssystem ermöglicht der Allgemeinheit Vorfälle zu melden und diese verfolgen zu lassen.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Herkunftsort der Person kann ungenau (für die Berichterstellung) oder genau (zur anzeige auf einer Karte ) sein. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Ort, zu dem die Person gehen wird, welcher ungenau (für Berichte) oder genau (für die Darstellung auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Media Library provides a catalog of digital media.': 'Das Medienverzeichnis bietet einen Katalog digitaler Medien',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Das Nachrichtenmodul ist der Hauptknotenpunkt der Kommunikation des Sahana Systems. Es wird verwendet, um Warnungen und/oder andere Nachrichten mit Hilfe von SMS & E-Mail an unterschiedliche Gruppen und Einzelpersonen während und nach einem Katastrophenfall zu schicken.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'Das Organisationsregister gibt einen Überblick über alle Hilfsorganisationen, die in der Region arbeiten.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Das Projektüberwachungsmodul ermöglicht die Erstellung von Aktivitäten um Lücken in Anforderungsbewertungen zu füllen.',
'The Role this person plays within this hospital.': 'Die Rolle die diese Person im Krankenhaus übernimmt.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Das Unterkunftsregister protokolliert alle Unterkünfte und speichert allgemeine Details. Es arbeitet mit anderen Modulen zusammen, um Menschen die sich in einer Unterkunft befinden, sowie die dort zur Verfügung stehenden Leistungen etc. zu dokumentieren.',
'The Shelter this Request is from': 'Die Unterkunft aus welcher diese Anforderung stammt',
'The Shelter this person is checking into.': 'Die Unterkunft in die diese Person eincheckt.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'Die URL zur "GetCapabilities" Operation eines MapWebService (WMS), dessen Kartenbenen über die Anzeige verfügbar sein sollen.',
'The URL of your web gateway without the post parameters': 'Die URL ihres Web gateways ohne die POST parameter.',
'The URL to access the service.': 'Die URL für den Zugriff zum Service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Die eindeutige Kennung (UUID) die dieser Einrichtung von der Regierung zugeordnet wurde.',
'The asset must be assigned to a site OR location.': 'Die Anlage muss einem Standort oder einem Gelände zugeordnet werden',
'The attribute which is used for the title of popups.': 'Das Atribut welches für den Titel von Dialogfenstern verwendet wird',
'The attribute within the KML which is used for the title of popups.': 'Das Attribut in der KML das für den Titel der Dialogfenster verwendet wird.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'Die Attribute innerhalb der KML, die für den body des Dialogfenster verwendet werden sollen. (Verwenden Sie ein Leerzeichen zwischen Attributen)',
'The body height (crown to heel) in cm.': 'Die Körpergrösse (Kopf bis Fuss) in cm.',
'The country the person usually lives in.': 'Das Land, in dem die Person normalerweise lebt.',
'The default Organization for whom this person is acting.': 'Die Standardorganisation, für die diese Person agiert',
'The default Organization for whom you are acting.': 'Die Standardorganisation für welche Sie agieren',
'The duplicate record will be deleted': 'Der doppelte Datensatz wird gelöscht.',
'The first or only name of the person (mandatory).': 'Der erste oder einzige Name der Person (erforderlich)',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Das Format der URL ist http://your/web/map/service?service=WMS&request=GetCapabilities wobei your/web/map/service für den Pfad der URL zum WMS steht',
'The language you wish the site to be displayed in.': 'Die Sprache in der die Seite angezeigt werden soll.',
'The list of Brands are maintained by the Administrators.': 'Die Liste der Marken wird von den Administratoren verwaltet.',
'The list of Catalogs are maintained by the Administrators.': 'Die Liste der Kataloge wird vom Administrator verwaltet.',
'The map will be displayed initially with this latitude at the center.': 'Die Karte wird zunächst auf diese Geographische Breite zentriert.',
'The map will be displayed initially with this longitude at the center.': 'Die Karte wird zunächst auf diese Geographische Länge zentriert.',
'The minimum number of features to form a cluster.': 'Die minimale Anzahl von Objekten, die als Cluster angezeigt werden.',
'The name to be used when calling for or directly addressing the person (optional).': 'Der zu verwendende Name beim Anfragen oder direkten Ansprechen der Person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'Die nächste Bildschirm erlaubt es, nähere Angaben zur Anzahl Menschen hier & ihrer Bedürfnisse zu machen.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Die Anzahl der Maßeinheiten eines alternativen Artikels, welcher einer Maßeinheit von diesem Artikel entspricht',
'The number of pixels apart that features need to be before they are clustered.': 'Mindestanzahl erforderlicher Pixel, damit sie nicht in Clustern zusammengefasst dargestellt werden.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Die Anzahl der Teilbilder rund um den sichtbaren Kartenausschnitt die heruntergeladen werden. Null bedeutet, dass die erste Seite schneller geladen wird, höhere Zahlen bedeuten dass nachfolgendes Schwenken schneller ist.',
'The person at the location who is reporting this incident (optional)': 'Die Person vor Ort welche das Ereignis meldet (optional)',
'The post variable containing the phone number': 'Der POST Parameter, der die Telefonnummer beinhaltet',
'The post variable on the URL used for sending messages': 'Der POST Parameter, der die Nachricht beinhaltet.',
'The post variables other than the ones containing the message and the phone number': 'Die POST Parameter, die nicht die Nachricht oder Telefonnummer beinhalten',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Der serielle Anschluss mit dem das Modem verbunden ist - /dev/ttyUSB0, etc unter linux und com1, com2, etc unter Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Der Server hat keine rechtzeitige Antwort von einem anderen Server erhalten, um die Anfrage des Clients beantworten zu können.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Der Server hat eine ungültige Antwort von einem anderen Server erhalten, dass er zugreift um die Anfrage vom Browser zu erfüllen.',
'The site where this position is based.': 'Das Gelände auf dem dieser Standort/Gebiet liegt.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Die zuständigen Mitarbeiter für Anlagen können Hilfe anfordern. Bezüglich dieser Anfragen können Zusagen gemacht werden. Diese bleiben solange offen, bis der Anforderer bestätigt, dass die Anfrage erfüllt ist.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Das genannte Ereignis stellt keine Bedrohung oder Sorge mehr dar und jede nachfolgende Aktion is unter <instruction> beschrieben.',
'The time at which the Event started.': 'Die Zeit zu der das Ereignis startete.',
'The token associated with this application on': 'Das token welches mit dieser Anwendung verbunden ist',
'The unique identifier which identifies this instance to other instances.': 'Die eindeutige Kennung (UUID), die diese Instanz bei der Kommunikation mit anderen Instanzen identifiziert.',
"The volunteer's role": "Rolle des Freiwilligen",
'The way in which an item is normally distributed': 'Die Art in der ein Artikel normalerweise verteilt wird.',
'The weight in kg.': 'Das Gewicht in kg.',
'The': 'Das',
'Thematic Mapping': 'Thematische Kartendarstellung',
'Theme Details': 'Details zum Thema',
'Theme added': 'Thema hinzugefügt',
'Theme deleted': 'Thema gelöscht',
'Theme updated': 'Thema aktualisiert',
'Theme': 'Thema',
'Themes': 'Themen',
'There are errors': 'Es sind Fehler aufgetreten',
'There are insufficient items in the Inventory to send this shipment': 'Es sind nicht genügend Artikel im Bestand um diese Lieferung zu abzusenden.',
'There are multiple records at this location': 'An dieser Stelle gibt es mehrere Datensätze',
'There is no address for this person yet. Add new address.': 'Für diese Person gibt es noch keine Adresse. Fügen Sie eine neue Adresse hinzu.',
'These are settings for Inbound Mail.': 'Dies sind Einstellungen für eingehende Mail.',
'These are the Incident Categories visible to normal End-Users': 'Dies sind die für alle Endbenutzer sichtbaren Kategorien von Vorfällen',
'These need to be added in Decimal Degrees.': 'Diese müssen in Dezimalgrad hinzugefügt werden.',
'They': 'Sie',
'This Group has no Members yet': 'Diese Gruppe hat noch keine Mitglieder',
'This Team has no Members yet': 'Dieses Team hat noch keine Mitglieder',
'This appears to be a duplicate of': 'Dies scheint ein Duplikat zu sein von',
'This file already exists on the server as': 'Diese Datei existiert bereits auf dem Server als',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': "Dies ist zulässig, wenn sich die Stufe noch im Aufbau befindet. Um unbeabsichtige Änderungen zu verhindern, nachdem dieses Level abgeschlossen ist, kann dies auf 'False' gesetzt werden.",
'This is the way to transfer data between machines as it maintains referential integrity.': 'Auf diese Weise werden Daten zwischen Maschinen übertragen um die referenzielle Integrität aufrecht zu erhalten.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Auf diese Weise werden Daten zwischen Maschinen übertragen, um die referenzielle Integrität aufrechtzu erhalten. Doppelte Daten sollten vorher manuell entfernt werden.',
'This level is not open for editing.': 'Diese Stufe ist nicht zum Bearbeiten freigegeben.',
'This might be due to a temporary overloading or maintenance of the server.': 'Dies wurde möglicherweise durch eine vorübergehende Überlastung oder Wartung des Servers ausgelöst.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Dieses Modul ermöglicht es, Bestandsartikel zwischen Beständen verschiedener Anlagen Anzufragen und zu liefern.',
'This module allows the editing of page content using a web browser.': 'Dieses Modul ermöglicht das Editieren der Webseite unter Verwendung des Browsers.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Mit diesem Modul können Szenarien sowohl für Übungen als auch für Ereignisse planen. Sie können geeignete Ressourcen (Menschen, Anlagen & Einrichtungen) zuordnen, damit diese leicht mobilisiert werden können.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Diese Seite zeigt ihnen die Protokolle von vorherigen Syncs. Klicken Sie auf den Link unten um auf diese Seite zu gelangen.',
'This screen allows you to upload a collection of photos to the server.': 'Diese Seite ermöglicht ihnen eine Sammlung von Fotos zum Server hochzuladen.',
'This setting can only be controlled by the Administrator.': 'Diese Einstellung kann nur vom Systemverwalter vorgenommen werden.',
'This shipment has already been received.': 'Diese Lieferung wurde bereits empfangen.',
'This shipment has already been sent.': 'Diese Lieferung wurde bereits abgeschickt.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Diese Lieferung wurde noch nicht empfangen - sie ist nicht abgebrochen worden weil sie immer noch editiert werden kann.',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Diese Sendung wurde nicht gesendet-es ist nicht abgebrochen worden weil können immer noch bearbeitet werden.',
'This shipment will be confirmed as received.': 'Der Empfang dieser Lieferung wurde bestätigt.',
'Thunderstorm': 'Gewitter',
'Thursday': 'Donnerstag',
'Ticket Details': 'Details zum Ticket',
'Ticket ID': 'Ticket-ID',
'Ticket added': 'Ticket hinzugefügt',
'Ticket deleted': 'Ticket gelöscht',
'Ticket updated': 'Ticket aktualisiert',
'Ticketing Module': 'Ticket Modul',
'Tile Mapping Service': 'TileMapService',
'Tilt-up concrete': 'Konkrete Neigung',
'Timber frame': 'Holzrahmen',
'Timeline Report': 'Bericht zum Zeitplan',
'Timeline': 'Zeitplan',
'Time Out': 'Ausgangszeit',
'Time Question': 'Zeit Frage',
'Title': 'Titel',
'Title to show for the Web Map Service panel in the Tools panel.': 'Titel, mit der die WebMapService-Leiste in der Werkzeugleiste angezeigt wird',
'To Location': 'Zum Standort',
'To Organization': 'Zur Organisation',
'To Person': 'Zu Händen von',
'To begin the sync process, click the button on the right =>': 'Zum Starten der Synchronisierung, klicken Sie auf die Schaltfläche auf der rechten Seite =>',
'To begin the sync process, click this button =>': 'Um den Synchronisierungsprozess zu starten, klicken Sie diese Schaltfläche =>',
'To create a personal map configuration, click': 'Um eine persönliche Kartenkonfiguration zu erstellen, klicken Sie auf',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Zum Bearbeiten von OpenStreetMap, müssen Sie die Einstellungen in models/000_config. py anpassen',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': "Um die Zeitachse zu verschieben nutzen Sie bitte das Mausrad, die Pfeiltasten oder verschieben Sie sie per Drag'n Drop",
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Um nach einer Jobbezeichnung zu suchen, geben sie einen beliebigen Teil des Namens ein. Sie können % als Wildcard verwenden.',
'To variable': 'zu variieren',
'To': 'Bis',
'To Address': 'Empfängeradresse',
'Tools': 'Arbeitsmittel',
'Tornado': 'Wirbelsturm',
'Total # of Target Beneficiaries': 'Gesamtzahl der Nutznießer',
'Total # of households of site visited': 'Gesamtzahl der Haushalte des besuchten Geländes',
'Total Beds': 'Betten insgesamt',
'Total Beneficiaries': 'Gesamtzahl Nutznießer',
'Total Budget': 'Gesamtbudget',
'Total Capacity (Night)': 'Gesamtkapazität (Nacht)',
'Total Cost per Megabyte': 'Gesamtkosten pro Megabyte',
'Total Cost per Minute': 'Gesamtkosten pro Minute',
'Total Cost': 'Gesamtkosten',
'Total Monthly Cost': 'Gesamte monatliche Kosten',
'Total Monthly': 'Insgesamt Monatlich',
'Total One-time Costs': 'Summe einmaliger Kosten',
'Total Persons': 'Gesamtzahl an Personen',
'Total Records: %(numrows)s': 'Gesamtzahl an Datensätzen %(numrows)s',
'Total Recurring Costs': 'Gesamte wiederkehrende Kosten',
'Total Unit Cost': 'Gesamtstückkosten',
'Total Units': 'Summe Einheiten',
'Total Value': 'Gesamtwert',
'Total Volume (m3)': 'Gesamtvolumen (m3)',
'Total Weight (kg)': 'Gesamtgewicht (kg)',
'Total gross floor area (square meters)': 'Gesamtgröße der Fläche (Quadratmeter)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Gesamtzahl der Betten in diesem Krankenhaus. Automatisch aktualisiert über die täglichen Berichte.',
'Total number of houses in the area': 'Gesamtzahl der Häuser im Gebiet',
'Total number of schools in affected area': 'Gesamtzahl der Schulen im betroffenen Gebiet',
'Total population of site visited': 'Gesamtzahl der Bevölkerung des besuchten Gebietes',
'Total': 'Summe',
'Tourist Group': 'Touristengruppe',
'Town': 'Stadt',
'Town / Municipality': 'Ort / Stadtbezirk',
'Traces internally displaced people (IDPs) and their needs': 'Verfolgung von Binnenflüchtlingen (IDP) und deren Bedürfnisse',
'Tracing': 'Verfolgung',
'Track Details': 'Details zum Track',
'Track deleted': 'Track gelöscht',
'Track updated': 'Track aktualisiert',
'Track uploaded': 'Track hochgeladen',
'Track with this Person?': 'Diese Person verfolgen?',
'Track': 'Track',
'Tracking of Projects, Activities and Tasks': 'Verfolgen von Projekten, Aktivitäten und Aufgaben',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Verfolgung von Basisinformationen über Ort, Einrichtungen und Größe von Unterkünften',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Verfolgung der Position, Verteilung, Kapazität und Aufteilung der Opfer auf Unterkünfte',
'Tracks': 'Verfolgungen',
'Traffic Report': 'Datenverkehrsbericht',
'Training Course Catalog': 'Schulungskurs-Katalog',
'Training Details': 'Details zur Schulung',
'Training Event': 'Schulungskurs',
'Training Events': 'Schulungskurse',
'Training Facility': 'Schulungseinrichtung',
'Training Hours (Month)': 'Trainingsstunden (Monat)',
'Training Hours (Year)': 'Trainingsstunden (Jahr)',
'Training Report': 'Schulungsbericht',
'Training added': 'Schulung hinzugefügt',
'Training deleted': 'Schulung gelöscht',
'Training updated': 'Schulung aktualisiert',
'Training': 'Schulung',
'Trainings': 'Weiterbildungen / Übungen',
'Transition Effect': 'Übergangseffekt',
'Transit Status': 'Transitstatus',
'Translation': 'Übersetzung',
'Transportation assistance, Rank': 'Transport-Unterstützung, Rank',
'Trauma Center': 'Trauma Zentrum',
'Travel Cost': 'Reisekosten',
'Tropical Storm': 'Tropischer Sturm',
'Tropo Messaging Token': 'Tropo Nachrichten Token',
'Tropo Settings': 'Tropo Einstellungen',
'Tropo settings updated': 'Tropo Einstellungen aktualisiert',
'Truck': 'Lastwagen',
'Try checking the URL for errors, maybe it was mistyped.': 'Untersuchen Sie die URL auf Fehler, vielleicht war sie falsch geschrieben.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': "Versuchen Sie den Knopf 'Aktualisieren/Erneut Laden' oder versuchen Sie nochmals die URL aus der Adresszeile.",
'Try refreshing the page or hitting the back button on your browser.': "Versuchen Sie die Seite zu aktualisieren oder den 'Zurück'-Knopf im Browser zu nutzen.",
'Tuesday': 'Dienstag',
'Tugboat Capacity': 'Schleppkahnkapazitäten',
'Tweeted by': 'Getwittert von',
'Tweeted on': 'Getwittert auf',
'Twilio Channels': 'Twilio Kanäle',
'Twitter Channels': 'Twitter Kanäle',
'Twitter ID or #hashtag': 'Twitter-ID oder #hashtag',
'Twitter InBox': 'Twitter Eingang',
'Twitter Search': 'Twitter Suche',
'Twitter Search Results': 'Twitter Suchergebnisse',
'Twitter Settings': 'Einstellungen für Twitter',
'Type of Construction': 'Bautyp',
'Type of water source before the disaster': 'Typ der Wasserquelle vor der Katastrophe',
'Type': 'Typ',
'Types': 'Typen',
'UN': 'UN',
'Un-Repairable': 'Nicht zu reparieren',
'Unable to parse CSV file!': 'CSV Datei kann nicht analysiert werden!',
'Understaffed': 'Unterbesetzt',
'Unidentified': 'Nicht identifiziert',
'Unit Cost': 'Kosten für Einheit',
'Unit Value': 'Einheitswert',
'Unit added': 'Einheit hinzugefügt',
'Unit deleted': 'Einheit gelöscht',
'Unit of Measure': 'Maßeinheit',
'Unit updated': 'Einheit aktualisiert',
'Unit': 'Einheit',
'Units': 'Einheiten',
'Unknown Peer': 'Unbekannter Peer',
'Unknown type of facility': 'Unbekannter Einrichtungstyp',
'Unknown': 'unbekannt',
'Unmark as duplicate': 'Duplikatsmeldung zurückziehen',
'Unreinforced masonry': 'Nicht verstärktes Mauerwerk',
'Unresolved Conflicts': 'Ungelöste Konflikte',
'Unsafe': 'Unsicher',
'Unselect to disable the modem': 'Abwählen um das Modem zu deaktivieren',
'Unsent': 'Nicht gesendet',
'Unsupported data format!': 'Nicht unterstütztes Datenformat!',
'Unsupported method!': 'Nicht unterstützte Methode!',
'Update Activity Report': 'Aktivitätsbericht aktualisieren',
'Update Cholera Treatment Capability Information': 'Aktualisieren der Informationen zu den Cholera Behandlungsmöglichkeiten',
'Update Request': 'Anfrage Aktualisieren',
'Update Service Profile': 'Leistungsprofil aktualisieren',
'Update Status': 'Status aktualisieren',
'Update Task Status': 'Status der Aufgabe aktualisieren',
'Update Unit': 'Enheit Aktualisieren',
'Update if Master': 'Aktualisiere wenn Master',
'Update if Newer': 'Aktualisiere falls neuer',
'Update your current ordered list': 'Aktualisieren Sie ihre aktuell bestellte Liste',
'Update': 'Aktualisierung',
'Updated By': 'Aktualisiert von',
'Upload Photos': 'Fotos hochladen',
'Upload Spreadsheet': 'Tabellendokument hochladen',
'Upload Track': 'Verfolgung hochladen',
'Upload a Spreadsheet': 'Ein Tabellendokument hochladen',
'Upload a file formatted according to the Template.': 'Laden Sie eine entsprechend der Vorlage formatierte Datei hoch.',
'Upload an Assessment Template import file': 'Upload einer Beurteilungsvorlage',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Grafikdatei hochladen (bmp, gif, jpeg-oder png), max. 300x300 Pixel!',
'Upload an image file here.': 'Laden Sie hier die Grafikdatei hoch.',
'Upload an image, such as a photo': 'Laden Sie eine Grafikdatei hoch, wie beispielsweise ein Foto',
'Uploaded Image': 'Hochgeladenes Bild',
'Upload translated files': 'Übersetzte Dateien hochladen',
'Upon Request': 'Eingehende Anfrage',
'Urban Fire': 'Siedlungsfeuer',
'Urban area': 'Stadtgebiet / Ballungsgebiet',
'Urgent': 'Dringend',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Verwende (...)&(...) für UND, (...)|(...) für ODER und ~(...) für NICHT um komplexere Abfragen zu erstellen.',
'Use Geocoder for address lookups?': "Verwendung von 'Geocoder' für Adressenüberprüfung?",
'Use deg, min, sec': 'Nutze Grad, Minuten, Sekunden',
'Use decimal': 'Nutze Dezimalgrad',
'Use default': 'Standardwert verwenden',
'Use for Login?': 'Für Login verwenden?',
'Use these links to download data that is currently in the database.': 'Verwenden Sie diese Links um Daten, die derzeit in der Datenbank liegen herunterzuladen.',
'Used by IRS & Assess': 'Verwendet vom IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Verwendet in onHover Tooltip & Cluster Popups um verschiedene Typen zu unterscheiden.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Verwendet um onHover Tooltip zu erstellen & das 1. Feld wird ebenfalls im Cluster Dialogfeld benutzt um zwischen verschiedenen Datensätzen zu unterscheiden.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Länge für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Breite für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to import data from spreadsheets into the database': 'Dient dazu Daten aus Tabellendokumenten in die Datenbank zu übertragen.',
'Used within Inventory Management, Request Management and Asset Management': 'Verwendung beim der Bestands-, Anfrage- und Anlagenverwaltung',
'User Account has been Disabled': 'Das Benutzerkonto wurde deaktiviert',
'User Details': 'Details zum Benutzer',
'User Management': 'Benutzerverwaltung',
'User Profile': 'Benutzerprofil',
'User Requests': 'Benutzeranfragen',
'User Updated': 'Benutzer aktualisiert',
'User added': 'Benutzer hinzugefügt',
'User already has this role': 'Der Benutzer hat bereits diese Rolle',
'User deleted': 'Benutzer gelöscht',
'User updated': 'Benutzer aktualisiert',
'User': 'Benutzer',
'Username': 'Benutzername',
'Users removed': 'Benutzer entfernt',
'Users': 'Benutzer',
'Uses the REST Query Format defined in': 'Verwendet das REST-Abfrageformat das definiert ist in',
'Utilities': 'Dienstprogramme',
'Utility, telecommunication, other non-transport infrastructure': 'Dienstprogramm, Telekommunikation, andere nicht-Verkehrsinfrastruktur',
'Utilization Report': 'Verwendungsbericht',
'Valid until': 'Gültig bis',
'Value per Pack': 'Wert pro Packet',
'Value': 'wert',
'Various Reporting functionalities': 'Verschiedene Funktionalitäten für das Berichtswesen',
'Vehicle Categories': 'Fahrzeugkategorien',
'Vehicle Crime': 'Fahrzeug Kriminalität',
'Vehicle Height (m)': 'Höhe des Fahrzeugs (m)',
'Vehicle Management': 'Fahrzeugmanagement',
'Vehicle Plate Number': 'Fahrzeugnummernschild',
'Vehicle Type': 'Fahrzeugtyp',
'Vehicle Types': 'Fahrzeugtypen',
'Vehicle Weight (kg)': 'Gewicht des Fahrzeugs (kg)',
'Vehicle': 'Fahrzeug',
'Vehicles': 'Fahrzeuge',
'Vehicles are assets with some extra details.': 'Fahrzeuge sind Anlagen, die mit einigen speziellen Funktionen ausgestattet sind',
'Venue': 'Örtlichkeit',
'Verification Status': 'Prüfstatus',
'Verified?': 'Geprüft?',
'Verify password': 'Passwortprüfung',
'Very Good': 'Sehr gut',
'Very High': 'Sehr hoch',
'Vessel Max Length': 'Wasserfahrzeug maximale Länge',
'View Alerts received using either Email or SMS': 'Empfangene Warnungen über E-Mail oder SMS',
'View All': 'Alles anzeigen',
'View Error Tickets': 'Fehler Tickets ansehen',
'View Fullscreen Map': 'Vollbild Karte anzeigen',
'View Image': 'Bild anzeigen',
'View Items': 'Artikel anzeigen',
'View On Map': 'Auf Karte anzeigen',
'View Outbox': 'Postausgang anzeigen',
'View Picture': 'Bild anzeigen',
'View Settings': 'Einstellungen anzeigen',
'View Test Result Reports': 'Zeige Berichte der Testergebnisse',
'View Tickets': 'Tickets anzeigen',
'View Translation Percentage': 'Zeige Übersetzungsstatistik',
'View and/or update their details': 'Anzeige und/oder Aktualisieren Ihrer Detailinformationen',
'View as Pages': 'Anzeige als Seiten',
'View or update the status of a hospital.': 'Anzeige oder Aktualisieren des Status eines Krankenhauses.',
'View pending requests and pledge support.': 'Anstehende Anforderungen anzeigen und Zusageunterstützung.',
'View the hospitals on a map.': 'Krankenhäuser auf einer Karte anzeigen',
'View/Edit the Database directly': 'Die Datenbank direkt anzeigen/bearbeiten',
'Village Leader': 'Dorfvorsteher',
'Village / Suburb': 'Ortschaft / Vorort',
'Village': 'Dorf',
'Visible?': 'Sichtbar?',
'Visual Recognition': 'Visuelle Erkennung',
'Volcanic Ash Cloud': 'Wolke vulkanischer Asche',
'Volcanic Event': 'Vulkanischen Ereignis',
'Volume (m3)': 'Volumen (m3)',
'Volunteer Availability': 'Verfügbarkeit von Freiwilligen',
'Volunteer Contact': 'Kontaktdaten des Freiwilligen',
'Volunteer Details': 'Details zu Freiwilligen',
'Volunteer Information': 'Freiwilligeninformation',
'Volunteer Management': 'Management von Freiwilligen',
'Volunteer Project': 'Freiwilligen Projekt',
'Volunteer Record': 'Freiwilligen Datensatz',
'Volunteer Report': 'Freiwilligen Bericht',
'Volunteer Request': 'Freiwilligen Anforderung',
'Volunteer Role': 'Rolle des Freiwilligen',
'Volunteer Role Catalog': 'Rollenkatalog für Freiwillige',
'Volunteer added': 'Freiwilliger hinzugefügt',
'Volunteer availability added': 'Freiwilligen Verfügbarkeit hinzugefügt',
'Volunteer availability deleted': 'Freiwilligen Verfügbarkeit geöscht',
'Volunteer availability updated': 'Freiwilligen Verfügbarkeit aktualisiert',
'Volunteer deleted': 'Freiwilliger gelöscht',
'Volunteer details updated': 'Details zu Freiwilligen aktualisiert',
'Volunteers were notified!': 'Freiwillige wurden unterrichtet!',
'Volunteers': 'Freiwillige',
'Volunteer': 'Freiwilliger',
'Vote': 'Abstimmung',
'Votes': 'Abstimmungen',
'WASH': 'WASH',
'Walking Only': 'Nur laufen',
'Wall or other structural damage': 'Wand oder andere Gebäudeschäden',
'Warehouse Details': 'Details zu Warenlager',
'Warehouse Stock': 'Lagerbestand',
'Warehousing Storage Capacity': 'Warenlager Ablagekapazität',
'Warehouse Type': 'Warenlagertyp',
'Warehouse Types': 'Warenlagertypen',
'Warehouse added': 'Warenlager hinzugefügt',
'Warehouse deleted': 'Warenlager gelöscht',
'Warehouse updated': 'Warenlager aktualisiert',
'Warehouse': 'Warenlager',
'Warehouses': 'Warenlager',
'Water Sanitation Hygiene': 'Wasser Abwasserentsorgung Hygiene',
'Water collection': 'Wassersammlung',
'Water gallon': 'Wasser Gallonen',
'Water storage containers in households': 'Wasser-Behälter in Haushalten',
'Water supply': 'Wasserversorgung',
'Waybill Number': 'Frachtbriefnummer',
'WB': 'Frachtbriefnr.',
'Web Feature Service': 'WebFeatureService',
'Web Map Service': 'WebMapService',
'Web Map Service Browser Name': 'WebMapService Browser Name',
'Web Map Service Browser URL': 'WebMapService Browser URL',
'Website': 'Webseite',
'Wednesday': 'Mittwoch',
'Weight (kg)': 'Gewicht (kg)',
'Weight': 'Gewicht',
'Welcome to the Sahana Portal at': 'Willkommen beim Sahana Portal',
'Well-Known Text': 'WellKnownText (OGC-WKT)',
'What the Items will be used for': 'Beabsichtigte Verwendung der Artikel',
'Wheat': 'Weizen',
'When reports were entered': 'Wann die Berichte eingegeben wurden',
'Whiskers': 'Barthaare',
'Who is doing what and where': 'Wer macht was und wo',
'Who usually collects water for the family?': 'Wer sammelt normalerweise Wasser für die Familie?',
'Width': 'Breite',
'Width (m)': 'Breite (m)',
'Wild Fire': 'Wildfeuer',
'Wind Chill': 'Kälte vom Wind',
'Window frame': 'Fensterrahmen',
'Winter Storm': 'Wintersturm',
'Women of Child Bearing Age': 'Frauen im gebärfähigen Alter',
'Women participating in coping activities': 'Frauen die sich an den Hilfsaktivitäten beteiligen',
'Women who are Pregnant or in Labour': 'Frauen die schwanger sind oder in den Wehen',
'Womens Focus Groups': 'Focus Gruppen für Frauen',
'Wooden plank': 'Hölzerne Planke',
'Wooden poles': 'Holzmasten',
'Working hours end': 'Arbeitszeit Ende',
'Working hours start': 'Arbeitszeit Beginn',
'Working or other to provide money/food': 'Arbeiten oder etwas anderes um Geld/Lebensmittel zur Verfügung zu stellen.',
'written-only': 'nur schriftlich',
'XYZ Tiles': 'XYZ Tiles',
'X-Ray': 'Röntgen',
'X-Ray Done': 'Röntgen erledigt',
'YES': 'JA',
'Year built': 'Baujahr',
'Year of Manufacture': 'Herstellungsjahr',
'Year': 'Jahr',
'Yellow': 'Gelb',
'Yes': 'Ja',
'yes': 'ja',
'You are a recovery team?': 'Sind Sie ein Bergungsteam?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Sie versuchen Ihr eigenes Konto zu löschen - sind Sie sicher, dass Sie fortfahren möchten?',
'You are currently reported missing!': 'Sie sind derzeit als vermisst gemeldet!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Sie können die Konfiguration des Synchronisierungsmodules unter Einstellungen anpassen. Diese Konfiguration enthält ihre UUID (unique identification number), Synchronisierungszeitpläne, Beacon-Service, usw. . Klicken sie auf den folgenden Link um zu den Einstellungen für die Synchronisierung zu gelangen.',
'You can click on the map below to select the Lat/Lon fields': 'Sie können auf die untere Karte klicken um Geographische und Geographische Breiten abzugreifen.',
'You can select the Draw tool': 'Sie können das Zeichen Tool verwenden',
'You can set the modem settings for SMS here.': 'Sie können die Modemeinstellungen für SMS hier festlegen.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Sie können das Konvertierungprogamm verwenden von GPS-Koordinatenoder Grad/Minuten/Sekunden umzuwandeln.',
'You do not have permission for any facility to make a commitment.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Zusage zu machen.',
'You do not have permission for any facility to make a request.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Anfrage zu starten.',
'You do not have permission for any site to add an inventory item.': 'Sie haben keine Berechtigung für irgendein Gelände einen Bestandsartikel hinzuzufügen.',
'You do not have permission for any site to receive a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung anzunehmen.',
'You do not have permission for any site to send a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung abzusenden.',
'You do not have permission to cancel this received shipment.': 'Sie haben keine Berechtigung diese erhaltene Lieferung zu löschen.',
'You do not have permission to cancel this sent shipment.': 'Sie haben keine Berechtigung diese gesendete Lieferung zu löschen.',
'You do not have permission to make this commitment.': 'Sie haben keine Berechtigung diese Zusage zu machen.',
'You do not have permission to receive this shipment.': 'Sie haben keine Berechtigung diese Lieferung entgegenzunehmen.',
'You do not have permission to send a shipment from this site.': 'Sie haben keine Berechtigung Lieferungen von diesem Gelände zu senden.',
'You do not have permission to send this shipment.': 'Sie haben keine Berechtigung diese Lieferung zu senden.',
'You have a personal map configuration. To change your personal configuration, click': 'Sie haben eine persönliche Kartenkonfiguration. Um ihre persönliche Konfiguration zu ändern, klicken Sie hier',
'You have found a dead body?': 'Sie haben eine Leiche gefunden?',
'You must be logged in to register volunteers.': 'Sie müssen angemeldet sein, um Freiwillige zu registrieren.',
'You must be logged in to report persons missing or found.': 'Sie müssen angemeldet sein, um fehlende oder gefundene Personen zu melden.',
'You must provide a series id to proceed.': 'Sie müssen eine serien-id vorweisen, um fortzufahren.',
'You should edit Twitter settings in models/000_config.py': 'Sie sollten die Twitter Einstellungen unter models/000_config.py bearbeiten',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Ihre aktuelle, geordnete Liste der Lösungselemente wird unten angezeigt. Sie können es durch Abstimmen erneut verändern.',
'Your post was added successfully.': 'Der Eintrag wurde erfolgreich hinzugefügt.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Ihr System verfügt über eine eindeutige ID (UUID), die andere Computer nützen können um Sie zu identifizieren. Zum Anzeigen Ihrer UUID, können Sie zur Synchronisierung gehen --> Sync Einstellungen Sie könnem auch andere Einstellungen auf dieser Seite einsehen.',
'Zero Hour': 'Stunde null',
'Zinc roof': 'Zinkdach',
'Zoom Levels': 'Zoomebenen',
'Zoom in': 'Hineinzoomen',
'Zoom to Current Location': 'Auf aktuelles Gebiet/Standort fokussieren',
'Zoom to maximum map extent': 'Auf maximale Kartenausdehung fokussieren',
'Zoom': 'Zoomen',
'active': 'aktiv',
'added': 'hinzugefügt',
'all records': 'Alle Datensätze',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Ermöglicht ein Budget zu entwickeln, basierend auf Mitarbeiter- und Gerätekosten, einschließlich aller administrativen Gemeinkosten.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'Ermöglicht die Erstellung und Verwaltung von Umfragen zur Beurteilung von Schäden nach einer Naturkatastrophe.',
'an individual/team to do in 1-2 days': 'Eine Aufwand von 1-2 Tagen für ein einzelnes Team',
'assigned': 'zugewiesen',
'average': 'Durchschnitt',
'black': 'schwarz',
'blue': 'blau',
'brown': 'braun',
'business_damaged': 'Business_beschädigt',
'by': 'durch',
'can be used to extract data from spreadsheets and put them into database tables.': 'Kann verwendet werden um Daten von einer Tabelle zu extrahieren und diese in Datenbanktabellen einzutragen.',
'check all': 'Alles markieren',
'click for more details': 'hier klicken, um mehr Details zu erhalten',
'consider': 'Berücksichtigen',
'curly': 'lockig',
'currently registered': 'derzeitig registriert',
'daily': 'täglich',
'dark': 'dunkel',
'data uploaded': 'hochgeladene Daten',
'database %s select': 'Datenbank%s gewählt',
'database': 'Datenbank',
'deceased': 'Verstorbene',
'delete all checked': 'Alle Ausgewählten löschen',
'deleted': 'gelöscht',
'design': 'Design',
'diseased': 'erkrankt',
'displaced': 'vertrieben',
'divorced': 'geschieden',
'done!': 'fertig!',
'duplicate': 'Dublette',
'eg. gas, electricity, water': 'zum Beispiel Gas, Strom, Wasser',
'enclosed area': 'eingeschlossener Bereich',
'export as csv file': 'Exportieren als CSV-Datei',
'fat': 'fett',
'feedback': 'Rückmeldung',
'female': 'weiblich',
'flush latrine with septic tank': 'die provisorische Toilette mit dem fauligen Tank spülen',
'food_sources': 'lebensmittel_quellen',
'forehead': 'Stirn',
'found': 'gefunden',
'from Twitter': 'aus Twitter',
'green': 'Grün',
'grey': 'grau',
'here': 'hier',
'high': 'hoch',
'hourly': 'stündlich',
'households': 'Haushalte',
'identified': 'identifiziert',
'ignore': 'ignorieren',
'in Deg Min Sec format': 'im Format Grad Minuten Sekunden',
'inactive': 'inaktiv',
'injured': 'verletzt',
'insert new %s': 'neue %en hinzufügen',
'insert new': 'neu einfügen',
'invalid request': 'Ungültige Anfrage',
'invalid': 'ungültig',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'ist ein zentrales online Verzeichnis, in dem Informationen zu allen Opfern und Familien der Katastrophe gespeichert werden können, insbesondere identifizierte Verluste, Evakuierte, Flüchtlinge, Heimatlose. Informationen wie Name, Alter, Kontaktnummer, Ausweisnummer, Vertriebenen-Ort und andere Details werden erfasst. Fotos und Fingerabdrücke der Leute können auf das System hochgeladen werden. Personen können zum Zweck der Effizienz und Einfachheit auch in Gruppen zusammengefasst werden',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'ist so konzipiert, dass es aus mehreren Untermodulen zu besteht. Diese arbeiten zusammen, um Organisationen komplexe Funktionalitäten zur Unterstützung von Hilfen und Durchführung von Projekten zur Verfügung zu stellen. Dies beinhaltet ein Aufnahmesystem, ein Warenlager Management System, Produkt-Tracking, Versorgungsketten-Management, Fahrzeugbestand Management, Beschaffungswesen, Finanz-Tracking und andere Bestands- und Resource Management Einsatzmöglichkeiten.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Überwacht alle eingehenden Tickets, so dass diese entsprechend eingestuft und an die entsprechende Stelle zur Bearbeitung geleitet werden können.',
'latrines': 'Toiletten',
'leave empty to detach account': 'Leerlassen um das Konto zu entfernen/aufzuheben.',
'legend URL': 'URL zur Legende',
'light': 'lichtquelle',
'login': 'Anmeldung',
'long': 'lang',
'long>12cm': 'lang > 12cm',
'low': 'niedrig',
'male': 'männlich',
'manual': 'manuell',
'married': 'verheiratet',
'medium': 'mittel',
'medium<12cm': 'mittel < 12 cm',
'meters': 'meter',
'missing': 'fehlend',
'module allows the site administrator to configure various options.': 'Modul das dem Seitenadministrator ermöglicht verschiedene Optionen zu konfigurieren.',
'module helps monitoring the status of hospitals.': 'Modul das hilft den Status von Krankenhäusern zu überwachen',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Modul das gemeinschaftlich einen Mechanismus bietet einen GIS-gestützen Überblick über die sich entwickelnde Lage zu erhalten.',
'more': 'mehr',
'n/a': 'nicht zutreffend',
'negroid': 'Negroid',
'never': 'nie',
'new record inserted': 'Neuen Datensatz eingefügt',
'new': 'neu',
'next 100 rows': 'Nächste 100 Zeilen',
'no': 'nein',
'none': 'nichts',
'not accessible - no cached version available!': 'Nicht verfügbar - keine zwischengespeicherte Version verfügbar!',
'not accessible - using cached version from': 'Nicht verfügbar - benutze zwischengespeicherte Version von',
'not specified': 'nicht angegeben',
'obsolete': 'obsolet',
'on': 'ein',
'once': 'einmal',
'open defecation': 'Verrichtung der Bedürfnisse im Freien',
'or import from csv file': 'oder aus CSV-Datei importieren',
'other': 'sonstige',
'over one hour': 'über eine Stunde',
'or drop here': "oder hier per Drag'n Drop ablegen",
'people': 'Personen',
'piece': 'Stück',
'pit latrine': 'Grubenlatrine',
'pit': 'Grube',
'postponed': 'zurückgestellt',
'preliminary template or draft, not actionable in its current form': 'vorläufige Vorlage oder Entwurf, nicht aussagekräftig in seiner jetzigen Form',
'previous 100 rows': 'Vorherige 100 Zeilen',
'record does not exist': 'Datensatz ist nicht vorhanden',
'record id': 'Datensatz ID',
'red': 'rot',
'reports successfully imported.': 'Berichte erfolgreich importiert.',
'representation of the Polygon/Line.': 'Darstellung der Fläche/Linie.',
'retired': 'Außer Dienst',
'river': 'Fluss',
'see comment': 'siehe Kommentar',
'selected': 'ausgewählt',
'separated from family': 'von Familie getrennt',
'separated': 'getrennt',
'shaved': 'rasiert',
'short': 'kurz',
'short<6cm': 'kurz < 6cm',
'sides': 'Seiten',
'sign-up now': 'Jetzt Registrieren',
'single': 'alleinstehend',
'slim': 'dünn',
'specify': 'genauer beschreiben',
'staff members': 'Mitarbeiter',
'staff': 'Personal',
'state location': 'Beschaffenheit des Standort',
'state': 'Zustand',
'straight': 'gerade',
'suffered financial losses': 'Finanzielle Verluste erlitten',
'table': 'Tabelle',
'tall': 'groß',
'this': 'Dieses',
'to access the system': 'um auf das System zuzugreifen',
'tonsure': 'Tonsur',
'total': 'Summe',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Tweepy Modul nicht verfügbar in der aktuellen Python Umgebung läuft - das benötigt die Installation einer none-Tropo Twitter Unterstützung!',
'unable to parse csv file': 'CSV Datei kann nicht analysiert werden',
'uncheck all': 'Alles deselektieren',
'unidentified': 'nicht identifiziert',
'unknown': 'unbekannt',
'unspecified': 'unspezifiziert',
'unverified': 'ungeprüft',
'updated': 'aktualisiert',
'updates only': 'nur Aktualisierungen',
'verified': 'verifiziert',
'volunteer': 'Freiwilliger',
'volunteers': 'Freiwillige',
'wavy': 'wellenförmige Lücke',
'weekly': 'wöchentlich',
'white': 'weiß',
'wider area, longer term, usually contain multiple Activities': 'Größerer Bereich, längere Sicht, enthält normalerweise mehrere Aktivitäten',
'widowed': 'verwitwet',
'within human habitat': 'In menschlichen Lebensraum',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt Modul nicht verfügbar im Rahmen der laufenden Python Umgebung - das muss installiert werden für XLS Ausgabe!'
}
|
{
"content_hash": "bf7be0dd8934886aa9a5f6350cc048a4",
"timestamp": "",
"source": "github",
"line_count": 4712,
"max_line_length": 931,
"avg_line_length": 60.47453310696095,
"alnum_prop": 0.7668797989865102,
"repo_name": "raj454raj/eden",
"id": "c092b5aecbe20aa85b08b397344eb999320180f9",
"size": "287136",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "languages/de.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3336714"
},
{
"name": "HTML",
"bytes": "1367974"
},
{
"name": "JavaScript",
"bytes": "20093511"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31162631"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3206786"
}
],
"symlink_target": ""
}
|
from bluesky.plans import rel_scan
from bluesky.callbacks import LiveTable, LivePlot
subs = [LiveTable(['diff_xh', 'xray_eye2_stats1_total', 'xray_eye2_stats2_total']),
LivePlot('xray_eye2_stats1_total', 'diff_xh')]
print ( ' Motor is diff.xh, camera is xray_eye2')
RE(rel_scan([xray_eye2], diff.xh, -.1, .1, 3), subs)
|
{
"content_hash": "3e083b320280f1f20cb2e4f60a250f06",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 84,
"avg_line_length": 41.25,
"alnum_prop": 0.6848484848484848,
"repo_name": "NSLS-II-CHX/ipython_ophyd",
"id": "98f0d2260581707d7959a4879f56ccc58e4d174c",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profile_collection/acceptance_tests/03-camera_xe2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "144"
},
{
"name": "JavaScript",
"bytes": "1763"
},
{
"name": "Python",
"bytes": "372492"
},
{
"name": "Roff",
"bytes": "8152"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._data_versions_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataVersionsOperations:
"""DataVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
name: str,
order_by: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[str] = None,
tags: Optional[str] = None,
list_view_type: Optional[Union[str, "_models.ListViewType"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.DataVersionBaseResourceArmPaginatedResult"]:
"""List data versions in the data container.
List data versions in the data container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Data container's name.
:type name: str
:param order_by: Please choose OrderBy value from ['createdtime', 'modifiedtime'].
:type order_by: str
:param top: Top count of results, top count cannot be greater than the page size.
If topCount > page size, results with be default page size count
will be returned.
:type top: int
:param skip: Continuation token for pagination.
:type skip: str
:param tags: Comma-separated list of tag names (and optionally values). Example:
tag1,tag2=value2.
:type tags: str
:param list_view_type: [ListViewType.ActiveOnly, ListViewType.ArchivedOnly,
ListViewType.All]View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataVersionBaseResourceArmPaginatedResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.DataVersionBaseResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataVersionBaseResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
order_by=order_by,
top=top,
skip=skip,
tags=tags,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
order_by=order_by,
top=top,
skip=skip,
tags=tags,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataVersionBaseResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
name: str,
version: str,
**kwargs: Any
) -> None:
"""Delete version.
Delete version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
name: str,
version: str,
**kwargs: Any
) -> "_models.DataVersionBase":
"""Get version.
Get version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataVersionBase"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataVersionBase', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
version: str,
body: "_models.DataVersionBase",
**kwargs: Any
) -> "_models.DataVersionBase":
"""Create or update version.
Create or update version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name.
:type name: str
:param version: Version identifier.
:type version: str
:param body: Version entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataVersionBase, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DataVersionBase
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataVersionBase"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'DataVersionBase')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
version=version,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DataVersionBase', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataVersionBase', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/data/{name}/versions/{version}"} # type: ignore
|
{
"content_hash": "88960b0ec20972c028c9d0eb9268d673",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 227,
"avg_line_length": 42.704301075268816,
"alnum_prop": 0.6347098073775651,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a231f467725ed28a9fe927d2c9ce28b2030334f4",
"size": "16386",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_10_01/aio/operations/_data_versions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'hikaricp', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-hikaricp',
version=ABOUT['__version__'],
description='The hikaricp check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent hikaricp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='Damien Bertau',
author_email='damien.bertau@blablacar.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.hikaricp'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
{
"content_hash": "5f5db4fdd70b2e9a595d749e69fba72d",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 81,
"avg_line_length": 30.266666666666666,
"alnum_prop": 0.652863436123348,
"repo_name": "DataDog/integrations-extras",
"id": "ef0720dec0e65bdcdda056c2c2ec87b7d6dccf8a",
"size": "2270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hikaricp/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4265"
},
{
"name": "Go",
"bytes": "4119"
},
{
"name": "PHP",
"bytes": "3192"
},
{
"name": "Python",
"bytes": "1219552"
},
{
"name": "Ruby",
"bytes": "8005"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
}
|
"""
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
|
{
"content_hash": "2f9031c1426dc577c5e5f0a51dc79b72",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 36.31818181818182,
"alnum_prop": 0.7108886107634543,
"repo_name": "xyguo/scikit-learn",
"id": "d9c2b570336a7d24c41e66daf2d05e181a2e9b65",
"size": "2397",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/classification/plot_digits_classification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6545381"
},
{
"name": "Shell",
"bytes": "9256"
}
],
"symlink_target": ""
}
|
import datetime
import hashlib
import sys
from contextlib import closing
from urllib2 import urlopen
from south.db import db
from south.logger import get_logger
from south.v2 import DataMigration
from django.db import models
from django.conf import settings
logger = get_logger()
class Migration(DataMigration):
@staticmethod
def get_actual_url(df):
from urlparse import urlparse
# Can't handle custom protocols
try:
if df.protocol in [t[0] for t in settings.DOWNLOAD_PROVIDERS]:
return None
except AttributeError:
pass
def get_absolute_filepath(df):
if df.protocol == 'staging':
return df.url
url = urlparse(df.url)
if url.scheme == '':
try:
from django.utils import _os
# FILE_STORE_PATH must be set
return _os.safe_join(settings.FILE_STORE_PATH, url.path)
except AttributeError:
return ''
if url.scheme == 'file':
return url.path
# ok, it doesn't look like the file is stored locally
else:
return ''
url = urlparse(df.url)
if url.scheme == '':
# Local file
return 'file://'+get_absolute_filepath(df)
# Remote files are also easy
if url.scheme in ('http', 'https', 'ftp', 'file'):
return df.url
return None
def forwards(self, orm):
print "Starting verification and SHA-512 generation for %d files." % \
orm.Dataset_File.objects.all().count();
for df in orm.Dataset_File.objects.all():
url = self.get_actual_url(df)
if not url:
continue
with closing(urlopen(url)) as f:
md5 = hashlib.new('md5')
sha512 = hashlib.new('sha512')
size = 0
def get_chunk():
return f.read(32 * sha512.block_size)
for chunk in iter(get_chunk, ''):
size += len(chunk)
md5.update(chunk)
sha512.update(chunk)
if df.size and int(df.size) != size:
raise Exception("Size does not match: %d should be %s" % \
(size, df.size))
md5sum = md5.hexdigest()
if df.md5sum and df.md5sum != md5sum:
raise Exception("MD5 does not match: %s should be %s" % \
(df.md5sum, md5sum))
df.md5sum = md5sum
df.sha512sum = sha512.hexdigest()
df.size = size
df.verified = True
df.save()
# Show progress
sys.stdout.write('.')
sys.stdout.flush()
print ""
print "Successfully verified all files."
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tardis_portal.author_experiment': {
'Meta': {'ordering': "['order']", 'unique_together': "(('experiment', 'author'),)", 'object_name': 'Author_Experiment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'})
},
'tardis_portal.datafileparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatafileParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatafileParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datafileparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatafileParameterSet'},
'dataset_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.dataset': {
'Meta': {'object_name': 'Dataset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.dataset_file': {
'Meta': {'object_name': 'Dataset_File'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'sha512sum': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'stay_remote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.datasetparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatasetParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatasetParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datasetparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatasetParameterSet'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.experiment': {
'Meta': {'object_name': 'Experiment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution_name': ('django.db.models.fields.CharField', [], {'default': "'The University of Queensland'", 'max_length': '400'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.License']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_access': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentacl': {
'Meta': {'ordering': "['experiment__id']", 'object_name': 'ExperimentACL'},
'aclOwnershipType': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canRead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canWrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'effectiveDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'entityId': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiryDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isOwner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pluginId': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'tardis_portal.experimentparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'ExperimentParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ExperimentParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'ExperimentParameterSet'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.freetextsearchfield': {
'Meta': {'object_name': 'FreeTextSearchField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"})
},
'tardis_portal.groupadmin': {
'Meta': {'object_name': 'GroupAdmin'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.license': {
'Meta': {'object_name': 'License'},
'allows_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'}),
'internal_description': ('django.db.models.fields.TextField', [], {}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
'tardis_portal.parametername': {
'Meta': {'ordering': "('order', 'name')", 'unique_together': "(('schema', 'name'),)", 'object_name': 'ParameterName'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'comparison_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'data_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
},
'tardis_portal.schema': {
'Meta': {'object_name': 'Schema'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.token': {
'Meta': {'object_name': 'Token'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiry_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 7, 26, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tardis_portal.userauthentication': {
'Meta': {'object_name': 'UserAuthentication'},
'authenticationMethod': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tardis_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDjangoAccount': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tardis_portal']
symmetrical = True
|
{
"content_hash": "0c9ce79d2b60aec3f3f3b2abad3efa47",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 182,
"avg_line_length": 66.37898089171975,
"alnum_prop": 0.5431079978889796,
"repo_name": "iiman/mytardis",
"id": "f28f8e610c078b432b9f2273a6abfb0804afb886",
"size": "20867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tardis/tardis_portal/migrations/0017_trusted_files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "308165"
},
{
"name": "Python",
"bytes": "1736671"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
"""
A pure-Python PDF library with an increasing number of capabilities.
See README for links to FAQ, documentation, homepage, etc.
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "biziqe@mathieu.fenniak.net"
__maintainer__ = "Phaseit, Inc."
__maintainer_email = "PyPDF2@phaseit.net"
import string
import math
import struct
import sys
import uuid
from sys import version_info
if version_info < ( 3, 0 ):
from cStringIO import StringIO
else:
from io import StringIO
if version_info < ( 3, 0 ):
BytesIO = StringIO
else:
from io import BytesIO
from . import filters
from . import utils
import warnings
import codecs
from .generic import *
from .utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList
from .utils import isString, b_, u_, ord_, chr_, str_, formatWarning
if version_info < ( 2, 4 ):
from sets import ImmutableSet as frozenset
if version_info < ( 2, 5 ):
from md5 import md5
else:
from hashlib import md5
import uuid
class PdfFileWriter(object):
"""
This class supports writing PDF files out, given pages produced by another
class (typically :class:`PdfFileReader<PdfFileReader>`).
"""
def __init__(self):
self._header = b_("%PDF-1.3")
self._objects = [] # array of indirect objects
# The root of our page tree node.
pages = DictionaryObject()
pages.update({
NameObject("/Type"): NameObject("/Pages"),
NameObject("/Count"): NumberObject(0),
NameObject("/Kids"): ArrayObject(),
})
self._pages = self._addObject(pages)
# info object
info = DictionaryObject()
info.update({
NameObject("/Producer"): createStringObject(codecs.BOM_UTF16_BE + u_("PyPDF2").encode('utf-16be'))
})
self._info = self._addObject(info)
# root object
root = DictionaryObject()
root.update({
NameObject("/Type"): NameObject("/Catalog"),
NameObject("/Pages"): self._pages,
})
self._root = None
self._root_object = root
def _addObject(self, obj):
self._objects.append(obj)
return IndirectObject(len(self._objects), 0, self)
def getObject(self, ido):
if ido.pdf != self:
raise ValueError("pdf must be self")
return self._objects[ido.idnum - 1]
def _addPage(self, page, action):
assert page["/Type"] == "/Page"
page[NameObject("/Parent")] = self._pages
page = self._addObject(page)
pages = self.getObject(self._pages)
action(pages["/Kids"], page)
pages[NameObject("/Count")] = NumberObject(pages["/Count"] + 1)
def addPage(self, page):
"""
Adds a page to this PDF file. The page is usually acquired from a
:class:`PdfFileReader<PdfFileReader>` instance.
:param PageObject page: The page to add to the document. Should be
an instance of :class:`PageObject<PyPDF2.pdf.PageObject>`
"""
self._addPage(page, list.append)
def insertPage(self, page, index=0):
"""
Insert a page in this PDF file. The page is usually acquired from a
:class:`PdfFileReader<PdfFileReader>` instance.
:param PageObject page: The page to add to the document. This
argument should be an instance of :class:`PageObject<pdf.PageObject>`.
:param int index: Position at which the page will be inserted.
"""
self._addPage(page, lambda l, p: l.insert(index, p))
def getPage(self, pageNumber):
"""
Retrieves a page by number from this PDF file.
:param int pageNumber: The page number to retrieve
(pages begin at zero)
:return: the page at the index given by *pageNumber*
:rtype: :class:`PageObject<pdf.PageObject>`
"""
pages = self.getObject(self._pages)
# XXX: crude hack
return pages["/Kids"][pageNumber].getObject()
def getNumPages(self):
"""
:return: the number of pages.
:rtype: int
"""
pages = self.getObject(self._pages)
return int(pages[NameObject("/Count")])
def addBlankPage(self, width=None, height=None):
"""
Appends a blank page to this PDF file and returns it. If no page size
is specified, use the size of the last page.
:param float width: The width of the new page expressed in default user
space units.
:param float height: The height of the new page expressed in default
user space units.
:return: the newly appended page
:rtype: :class:`PageObject<PyPDF2.pdf.PageObject>`
:raises PageSizeNotDefinedError: if width and height are not defined
and previous page does not exist.
"""
page = PageObject.createBlankPage(self, width, height)
self.addPage(page)
return page
def insertBlankPage(self, width=None, height=None, index=0):
"""
Inserts a blank page to this PDF file and returns it. If no page size
is specified, use the size of the last page.
:param float width: The width of the new page expressed in default user
space units.
:param float height: The height of the new page expressed in default
user space units.
:param int index: Position to add the page.
:return: the newly appended page
:rtype: :class:`PageObject<PyPDF2.pdf.PageObject>`
:raises PageSizeNotDefinedError: if width and height are not defined
and previous page does not exist.
"""
if width is None or height is None and \
(self.getNumPages() - 1) >= index:
oldpage = self.getPage(index)
width = oldpage.mediaBox.getWidth()
height = oldpage.mediaBox.getHeight()
page = PageObject.createBlankPage(self, width, height)
self.insertPage(page, index)
return page
def addJS(self, javascript):
"""
Add Javascript which will launch upon opening this PDF.
:param str javascript: Your Javascript.
>>> output.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});")
# Example: This will launch the print window when the PDF is opened.
"""
js = DictionaryObject()
js.update({
NameObject("/Type"): NameObject("/Action"),
NameObject("/S"): NameObject("/JavaScript"),
NameObject("/JS"): NameObject("(%s)" % javascript)
})
js_indirect_object = self._addObject(js)
# We need a name for parameterized javascript in the pdf file, but it can be anything.
js_string_name = str(uuid.uuid4())
js_name_tree = DictionaryObject()
js_name_tree.update({
NameObject("/JavaScript"): DictionaryObject({
NameObject("/Names"): ArrayObject([createStringObject(js_string_name), js_indirect_object])
})
})
self._addObject(js_name_tree)
self._root_object.update({
NameObject("/OpenAction"): js_indirect_object,
NameObject("/Names"): js_name_tree
})
def addAttachment(self, fname, fdata):
"""
Embed a file inside the PDF.
:param str fname: The filename to display.
:param str fdata: The data in the file.
Reference:
https://www.adobe.com/content/dam/Adobe/en/devnet/acrobat/pdfs/PDF32000_2008.pdf
Section 7.11.3
"""
# We need 3 entries:
# * The file's data
# * The /Filespec entry
# * The file's name, which goes in the Catalog
# The entry for the file
""" Sample:
8 0 obj
<<
/Length 12
/Type /EmbeddedFile
>>
stream
Hello world!
endstream
endobj
"""
file_entry = DecodedStreamObject()
file_entry.setData(fdata)
file_entry.update({
NameObject("/Type"): NameObject("/EmbeddedFile")
})
# The Filespec entry
""" Sample:
7 0 obj
<<
/Type /Filespec
/F (hello.txt)
/EF << /F 8 0 R >>
>>
"""
efEntry = DictionaryObject()
efEntry.update({ NameObject("/F"):file_entry })
filespec = DictionaryObject()
filespec.update({
NameObject("/Type"): NameObject("/Filespec"),
NameObject("/F"): createStringObject(fname), # Perhaps also try TextStringObject
NameObject("/EF"): efEntry
})
# Then create the entry for the root, as it needs a reference to the Filespec
""" Sample:
1 0 obj
<<
/Type /Catalog
/Outlines 2 0 R
/Pages 3 0 R
/Names << /EmbeddedFiles << /Names [(hello.txt) 7 0 R] >> >>
>>
endobj
"""
embeddedFilesNamesDictionary = DictionaryObject()
embeddedFilesNamesDictionary.update({
NameObject("/Names"): ArrayObject([createStringObject(fname), filespec])
})
embeddedFilesDictionary = DictionaryObject()
embeddedFilesDictionary.update({
NameObject("/EmbeddedFiles"): embeddedFilesNamesDictionary
})
# Update the root
self._root_object.update({
NameObject("/Names"): embeddedFilesDictionary
})
def appendPagesFromReader(self, reader, after_page_append=None):
"""
Copy pages from reader to writer. Includes an optional callback parameter
which is invoked after pages are appended to the writer.
:param reader: a PdfFileReader object from which to copy page
annotations to this writer object. The writer's annots
will then be updated
:callback after_page_append (function): Callback function that is invoked after
each page is appended to the writer. Callback signature:
:param writer_pageref (PDF page reference): Reference to the page
appended to the writer.
"""
# Get page count from writer and reader
reader_num_pages = reader.getNumPages()
writer_num_pages = self.getNumPages()
# Copy pages from reader to writer
for rpagenum in range(0, reader_num_pages):
reader_page = reader.getPage(rpagenum)
self.addPage(reader_page)
writer_page = self.getPage(writer_num_pages+rpagenum)
# Trigger callback, pass writer page as parameter
if callable(after_page_append): after_page_append(writer_page)
def updatePageFormFieldValues(self, page, fields):
'''
Update the form field values for a given page from a fields dictionary.
Copy field texts and values from fields to page.
:param page: Page reference from PDF writer where the annotations
and field data will be updated.
:param fields: a Python dictionary of field names (/T) and text
values (/V)
'''
# Iterate through pages, update field values
for j in range(0, len(page['/Annots'])):
writer_annot = page['/Annots'][j].getObject()
for field in fields:
if writer_annot.get('/T') == field:
writer_annot.update({
NameObject("/V"): TextStringObject(fields[field])
})
def cloneReaderDocumentRoot(self, reader):
'''
Copy the reader document root to the writer.
:param reader: PdfFileReader from the document root should be copied.
:callback after_page_append
'''
self._root_object = reader.trailer['/Root']
def cloneDocumentFromReader(self, reader, after_page_append=None):
'''
Create a copy (clone) of a document from a PDF file reader
:param reader: PDF file reader instance from which the clone
should be created.
:callback after_page_append (function): Callback function that is invoked after
each page is appended to the writer. Signature includes a reference to the
appended page (delegates to appendPagesFromReader). Callback signature:
:param writer_pageref (PDF page reference): Reference to the page just
appended to the document.
'''
self.cloneReaderDocumentRoot(reader)
self.appendPagesFromReader(reader, after_page_append)
def encrypt(self, user_pwd, owner_pwd = None, use_128bit = True):
"""
Encrypt this PDF file with the PDF Standard encryption handler.
:param str user_pwd: The "user password", which allows for opening
and reading the PDF file with the restrictions provided.
:param str owner_pwd: The "owner password", which allows for
opening the PDF files without any restrictions. By default,
the owner password is the same as the user password.
:param bool use_128bit: flag as to whether to use 128bit
encryption. When false, 40bit encryption will be used. By default,
this flag is on.
"""
import time, random
if owner_pwd == None:
owner_pwd = user_pwd
if use_128bit:
V = 2
rev = 3
keylen = int(128 / 8)
else:
V = 1
rev = 2
keylen = int(40 / 8)
# permit everything:
P = -1
O = ByteStringObject(_alg33(owner_pwd, user_pwd, rev, keylen))
ID_1 = ByteStringObject(md5(b_(repr(time.time()))).digest())
ID_2 = ByteStringObject(md5(b_(repr(random.random()))).digest())
self._ID = ArrayObject((ID_1, ID_2))
if rev == 2:
U, key = _alg34(user_pwd, O, P, ID_1)
else:
assert rev == 3
U, key = _alg35(user_pwd, rev, keylen, O, P, ID_1, False)
encrypt = DictionaryObject()
encrypt[NameObject("/Filter")] = NameObject("/Standard")
encrypt[NameObject("/V")] = NumberObject(V)
if V == 2:
encrypt[NameObject("/Length")] = NumberObject(keylen * 8)
encrypt[NameObject("/R")] = NumberObject(rev)
encrypt[NameObject("/O")] = ByteStringObject(O)
encrypt[NameObject("/U")] = ByteStringObject(U)
encrypt[NameObject("/P")] = NumberObject(P)
self._encrypt = self._addObject(encrypt)
self._encrypt_key = key
def write(self, stream):
"""
Writes the collection of pages added to this object out as a PDF file.
:param stream: An object to write the file to. The object must support
the write method and the tell method, similar to a file object.
"""
if hasattr(stream, 'mode') and 'b' not in stream.mode:
warnings.warn("File <%s> to write to is not in binary mode. It may not be written to correctly." % stream.name)
debug = False
import struct
if not self._root:
self._root = self._addObject(self._root_object)
externalReferenceMap = {}
# PDF objects sometimes have circular references to their /Page objects
# inside their object tree (for example, annotations). Those will be
# indirect references to objects that we've recreated in this PDF. To
# address this problem, PageObject's store their original object
# reference number, and we add it to the external reference map before
# we sweep for indirect references. This forces self-page-referencing
# trees to reference the correct new object location, rather than
# copying in a new copy of the page object.
for objIndex in range(len(self._objects)):
obj = self._objects[objIndex]
if isinstance(obj, PageObject) and obj.indirectRef != None:
data = obj.indirectRef
if data.pdf not in externalReferenceMap:
externalReferenceMap[data.pdf] = {}
if data.generation not in externalReferenceMap[data.pdf]:
externalReferenceMap[data.pdf][data.generation] = {}
externalReferenceMap[data.pdf][data.generation][data.idnum] = IndirectObject(objIndex + 1, 0, self)
self.stack = []
if debug: print(("ERM:", externalReferenceMap, "root:", self._root))
self._sweepIndirectReferences(externalReferenceMap, self._root)
del self.stack
# Begin writing:
object_positions = []
stream.write(self._header + b_("\n"))
for i in range(len(self._objects)):
idnum = (i + 1)
obj = self._objects[i]
object_positions.append(stream.tell())
stream.write(b_(str(idnum) + " 0 obj\n"))
key = None
if hasattr(self, "_encrypt") and idnum != self._encrypt.idnum:
pack1 = struct.pack("<i", i + 1)[:3]
pack2 = struct.pack("<i", 0)[:2]
key = self._encrypt_key + pack1 + pack2
assert len(key) == (len(self._encrypt_key) + 5)
md5_hash = md5(key).digest()
key = md5_hash[:min(16, len(self._encrypt_key) + 5)]
obj.writeToStream(stream, key)
stream.write(b_("\nendobj\n"))
# xref table
xref_location = stream.tell()
stream.write(b_("xref\n"))
stream.write(b_("0 %s\n" % (len(self._objects) + 1)))
stream.write(b_("%010d %05d f \n" % (0, 65535)))
for offset in object_positions:
stream.write(b_("%010d %05d n \n" % (offset, 0)))
# trailer
stream.write(b_("trailer\n"))
trailer = DictionaryObject()
trailer.update({
NameObject("/Size"): NumberObject(len(self._objects) + 1),
NameObject("/Root"): self._root,
NameObject("/Info"): self._info,
})
if hasattr(self, "_ID"):
trailer[NameObject("/ID")] = self._ID
if hasattr(self, "_encrypt"):
trailer[NameObject("/Encrypt")] = self._encrypt
trailer.writeToStream(stream, None)
# eof
stream.write(b_("\nstartxref\n%s\n%%%%EOF\n" % (xref_location)))
def addMetadata(self, infos):
"""
Add custom metadata to the output.
:param dict infos: a Python dictionary where each key is a field
and each value is your new metadata.
"""
args = {}
for key, value in list(infos.items()):
args[NameObject(key)] = createStringObject(value)
self.getObject(self._info).update(args)
def _sweepIndirectReferences(self, externMap, data):
debug = False
if debug: print((data, "TYPE", data.__class__.__name__))
if isinstance(data, DictionaryObject):
for key, value in list(data.items()):
origvalue = value
value = self._sweepIndirectReferences(externMap, value)
if isinstance(value, StreamObject):
# a dictionary value is a stream. streams must be indirect
# objects, so we need to change this value.
value = self._addObject(value)
data[key] = value
return data
elif isinstance(data, ArrayObject):
for i in range(len(data)):
value = self._sweepIndirectReferences(externMap, data[i])
if isinstance(value, StreamObject):
# an array value is a stream. streams must be indirect
# objects, so we need to change this value
value = self._addObject(value)
data[i] = value
return data
elif isinstance(data, IndirectObject):
# internal indirect references are fine
if data.pdf == self:
if data.idnum in self.stack:
return data
else:
self.stack.append(data.idnum)
realdata = self.getObject(data)
self._sweepIndirectReferences(externMap, realdata)
return data
else:
newobj = externMap.get(data.pdf, {}).get(data.generation, {}).get(data.idnum, None)
if newobj == None:
try:
newobj = data.pdf.getObject(data)
self._objects.append(None) # placeholder
idnum = len(self._objects)
newobj_ido = IndirectObject(idnum, 0, self)
if data.pdf not in externMap:
externMap[data.pdf] = {}
if data.generation not in externMap[data.pdf]:
externMap[data.pdf][data.generation] = {}
externMap[data.pdf][data.generation][data.idnum] = newobj_ido
newobj = self._sweepIndirectReferences(externMap, newobj)
self._objects[idnum-1] = newobj
return newobj_ido
except ValueError:
# Unable to resolve the Object, returning NullObject instead.
return NullObject()
return newobj
else:
return data
def getReference(self, obj):
idnum = self._objects.index(obj) + 1
ref = IndirectObject(idnum, 0, self)
assert ref.getObject() == obj
return ref
def getOutlineRoot(self):
if '/Outlines' in self._root_object:
outline = self._root_object['/Outlines']
idnum = self._objects.index(outline) + 1
outlineRef = IndirectObject(idnum, 0, self)
assert outlineRef.getObject() == outline
else:
outline = TreeObject()
outline.update({ })
outlineRef = self._addObject(outline)
self._root_object[NameObject('/Outlines')] = outlineRef
return outline
def getNamedDestRoot(self):
if '/Names' in self._root_object and isinstance(self._root_object['/Names'], DictionaryObject):
names = self._root_object['/Names']
idnum = self._objects.index(names) + 1
namesRef = IndirectObject(idnum, 0, self)
assert namesRef.getObject() == names
if '/Dests' in names and isinstance(names['/Dests'], DictionaryObject):
dests = names['/Dests']
idnum = self._objects.index(dests) + 1
destsRef = IndirectObject(idnum, 0, self)
assert destsRef.getObject() == dests
if '/Names' in dests:
nd = dests['/Names']
else:
nd = ArrayObject()
dests[NameObject('/Names')] = nd
else:
dests = DictionaryObject()
destsRef = self._addObject(dests)
names[NameObject('/Dests')] = destsRef
nd = ArrayObject()
dests[NameObject('/Names')] = nd
else:
names = DictionaryObject()
namesRef = self._addObject(names)
self._root_object[NameObject('/Names')] = namesRef
dests = DictionaryObject()
destsRef = self._addObject(dests)
names[NameObject('/Dests')] = destsRef
nd = ArrayObject()
dests[NameObject('/Names')] = nd
return nd
def addBookmarkDestination(self, dest, parent=None):
destRef = self._addObject(dest)
outlineRef = self.getOutlineRoot()
if parent == None:
parent = outlineRef
parent = parent.getObject()
#print parent.__class__.__name__
parent.addChild(destRef, self)
return destRef
def addBookmarkDict(self, bookmark, parent=None):
bookmarkObj = TreeObject()
for k, v in list(bookmark.items()):
bookmarkObj[NameObject(str(k))] = v
bookmarkObj.update(bookmark)
if '/A' in bookmark:
action = DictionaryObject()
for k, v in list(bookmark['/A'].items()):
action[NameObject(str(k))] = v
actionRef = self._addObject(action)
bookmarkObj[NameObject('/A')] = actionRef
bookmarkRef = self._addObject(bookmarkObj)
outlineRef = self.getOutlineRoot()
if parent == None:
parent = outlineRef
parent = parent.getObject()
parent.addChild(bookmarkRef, self)
return bookmarkRef
def addBookmark(self, title, pagenum, parent=None, color=None, bold=False, italic=False, fit='/Fit', *args):
"""
Add a bookmark to this PDF file.
:param str title: Title to use for this bookmark.
:param int pagenum: Page number this bookmark will point to.
:param parent: A reference to a parent bookmark to create nested
bookmarks.
:param tuple color: Color of the bookmark as a red, green, blue tuple
from 0.0 to 1.0
:param bool bold: Bookmark is bold
:param bool italic: Bookmark is italic
:param str fit: The fit of the destination page. See
:meth:`addLink()<addLink>` for details.
"""
pageRef = self.getObject(self._pages)['/Kids'][pagenum]
action = DictionaryObject()
zoomArgs = []
for a in args:
if a is not None:
zoomArgs.append(NumberObject(a))
else:
zoomArgs.append(NullObject())
dest = Destination(NameObject("/"+title + " bookmark"), pageRef, NameObject(fit), *zoomArgs)
destArray = dest.getDestArray()
action.update({
NameObject('/D') : destArray,
NameObject('/S') : NameObject('/GoTo')
})
actionRef = self._addObject(action)
outlineRef = self.getOutlineRoot()
if parent == None:
parent = outlineRef
bookmark = TreeObject()
bookmark.update({
NameObject('/A'): actionRef,
NameObject('/Title'): createStringObject(title),
})
if color is not None:
bookmark.update({NameObject('/C'): ArrayObject([FloatObject(c) for c in color])})
format = 0
if italic:
format += 1
if bold:
format += 2
if format:
bookmark.update({NameObject('/F'): NumberObject(format)})
bookmarkRef = self._addObject(bookmark)
parent = parent.getObject()
parent.addChild(bookmarkRef, self)
return bookmarkRef
def addNamedDestinationObject(self, dest):
destRef = self._addObject(dest)
nd = self.getNamedDestRoot()
nd.extend([dest['/Title'], destRef])
return destRef
def addNamedDestination(self, title, pagenum):
pageRef = self.getObject(self._pages)['/Kids'][pagenum]
dest = DictionaryObject()
dest.update({
NameObject('/D') : ArrayObject([pageRef, NameObject('/FitH'), NumberObject(826)]),
NameObject('/S') : NameObject('/GoTo')
})
destRef = self._addObject(dest)
nd = self.getNamedDestRoot()
nd.extend([title, destRef])
return destRef
def removeLinks(self):
"""
Removes links and annotations from this output.
"""
pages = self.getObject(self._pages)['/Kids']
for page in pages:
pageRef = self.getObject(page)
if "/Annots" in pageRef:
del pageRef['/Annots']
def removeImages(self, ignoreByteStringObject=False):
"""
Removes images from this output.
:param bool ignoreByteStringObject: optional parameter
to ignore ByteString Objects.
"""
pages = self.getObject(self._pages)['/Kids']
for j in range(len(pages)):
page = pages[j]
pageRef = self.getObject(page)
content = pageRef['/Contents'].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, pageRef)
_operations = []
seq_graphics = False
for operands, operator in content.operations:
if operator == b_('Tj'):
text = operands[0]
if ignoreByteStringObject:
if not isinstance(text, TextStringObject):
operands[0] = TextStringObject()
elif operator == b_("'"):
text = operands[0]
if ignoreByteStringObject:
if not isinstance(text, TextStringObject):
operands[0] = TextStringObject()
elif operator == b_('"'):
text = operands[2]
if ignoreByteStringObject:
if not isinstance(text, TextStringObject):
operands[2] = TextStringObject()
elif operator == b_("TJ"):
for i in range(len(operands[0])):
if ignoreByteStringObject:
if not isinstance(operands[0][i], TextStringObject):
operands[0][i] = TextStringObject()
if operator == b_('q'):
seq_graphics = True
if operator == b_('Q'):
seq_graphics = False
if seq_graphics:
if operator in [b_('cm'), b_('w'), b_('J'), b_('j'), b_('M'), b_('d'), b_('ri'), b_('i'),
b_('gs'), b_('W'), b_('b'), b_('s'), b_('S'), b_('f'), b_('F'), b_('n'), b_('m'), b_('l'),
b_('c'), b_('v'), b_('y'), b_('h'), b_('B'), b_('Do'), b_('sh')]:
continue
if operator == b_('re'):
continue
_operations.append((operands, operator))
content.operations = _operations
pageRef.__setitem__(NameObject('/Contents'), content)
def removeText(self, ignoreByteStringObject=False):
"""
Removes images from this output.
:param bool ignoreByteStringObject: optional parameter
to ignore ByteString Objects.
"""
pages = self.getObject(self._pages)['/Kids']
for j in range(len(pages)):
page = pages[j]
pageRef = self.getObject(page)
content = pageRef['/Contents'].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, pageRef)
for operands,operator in content.operations:
if operator == b_('Tj'):
text = operands[0]
if not ignoreByteStringObject:
if isinstance(text, TextStringObject):
operands[0] = TextStringObject()
else:
if isinstance(text, TextStringObject) or \
isinstance(text, ByteStringObject):
operands[0] = TextStringObject()
elif operator == b_("'"):
text = operands[0]
if not ignoreByteStringObject:
if isinstance(text, TextStringObject):
operands[0] = TextStringObject()
else:
if isinstance(text, TextStringObject) or \
isinstance(text, ByteStringObject):
operands[0] = TextStringObject()
elif operator == b_('"'):
text = operands[2]
if not ignoreByteStringObject:
if isinstance(text, TextStringObject):
operands[2] = TextStringObject()
else:
if isinstance(text, TextStringObject) or \
isinstance(text, ByteStringObject):
operands[2] = TextStringObject()
elif operator == b_("TJ"):
for i in range(len(operands[0])):
if not ignoreByteStringObject:
if isinstance(operands[0][i], TextStringObject):
operands[0][i] = TextStringObject()
else:
if isinstance(operands[0][i], TextStringObject) or \
isinstance(operands[0][i], ByteStringObject):
operands[0][i] = TextStringObject()
pageRef.__setitem__(NameObject('/Contents'), content)
def addLink(self, pagenum, pagedest, rect, border=None, fit='/Fit', *args):
"""
Add an internal link from a rectangular area to the specified page.
:param int pagenum: index of the page on which to place the link.
:param int pagedest: index of the page to which the link should go.
:param rect: :class:`RectangleObject<PyPDF2.generic.RectangleObject>` or array of four
integers specifying the clickable rectangular area
``[xLL, yLL, xUR, yUR]``, or string in the form ``"[ xLL yLL xUR yUR ]"``.
:param border: if provided, an array describing border-drawing
properties. See the PDF spec for details. No border will be
drawn if this argument is omitted.
:param str fit: Page fit or 'zoom' option (see below). Additional arguments may need
to be supplied. Passing ``None`` will be read as a null value for that coordinate.
Valid zoom arguments (see Table 8.2 of the PDF 1.7 reference for details):
/Fit No additional arguments
/XYZ [left] [top] [zoomFactor]
/FitH [top]
/FitV [left]
/FitR [left] [bottom] [right] [top]
/FitB No additional arguments
/FitBH [top]
/FitBV [left]
"""
pageLink = self.getObject(self._pages)['/Kids'][pagenum]
pageDest = self.getObject(self._pages)['/Kids'][pagedest] #TODO: switch for external link
pageRef = self.getObject(pageLink)
if border is not None:
borderArr = [NameObject(n) for n in border[:3]]
if len(border) == 4:
dashPattern = ArrayObject([NameObject(n) for n in border[3]])
borderArr.append(dashPattern)
else:
borderArr = [NumberObject(0)] * 3
if isString(rect):
rect = NameObject(rect)
elif isinstance(rect, RectangleObject):
pass
else:
rect = RectangleObject(rect)
zoomArgs = []
for a in args:
if a is not None:
zoomArgs.append(NumberObject(a))
else:
zoomArgs.append(NullObject())
dest = Destination(NameObject("/LinkName"), pageDest, NameObject(fit), *zoomArgs) #TODO: create a better name for the link
destArray = dest.getDestArray()
lnk = DictionaryObject()
lnk.update({
NameObject('/Type'): NameObject('/Annot'),
NameObject('/Subtype'): NameObject('/Link'),
NameObject('/P'): pageLink,
NameObject('/Rect'): rect,
NameObject('/Border'): ArrayObject(borderArr),
NameObject('/Dest'): destArray
})
lnkRef = self._addObject(lnk)
if "/Annots" in pageRef:
pageRef['/Annots'].append(lnkRef)
else:
pageRef[NameObject('/Annots')] = ArrayObject([lnkRef])
_valid_layouts = ['/NoLayout', '/SinglePage', '/OneColumn', '/TwoColumnLeft', '/TwoColumnRight', '/TwoPageLeft', '/TwoPageRight']
def getPageLayout(self):
"""
Get the page layout.
See :meth:`setPageLayout()<PdfFileWriter.setPageLayout>` for a description of valid layouts.
:return: Page layout currently being used.
:rtype: str, None if not specified
"""
try:
return self._root_object['/PageLayout']
except KeyError:
return None
def setPageLayout(self, layout):
"""
Set the page layout
:param str layout: The page layout to be used
Valid layouts are:
/NoLayout Layout explicitly not specified
/SinglePage Show one page at a time
/OneColumn Show one column at a time
/TwoColumnLeft Show pages in two columns, odd-numbered pages on the left
/TwoColumnRight Show pages in two columns, odd-numbered pages on the right
/TwoPageLeft Show two pages at a time, odd-numbered pages on the left
/TwoPageRight Show two pages at a time, odd-numbered pages on the right
"""
if not isinstance(layout, NameObject):
if layout not in self._valid_layouts:
warnings.warn("Layout should be one of: {}".format(', '.join(self._valid_layouts)))
layout = NameObject(layout)
self._root_object.update({NameObject('/PageLayout'): layout})
pageLayout = property(getPageLayout, setPageLayout)
"""Read and write property accessing the :meth:`getPageLayout()<PdfFileWriter.getPageLayout>`
and :meth:`setPageLayout()<PdfFileWriter.setPageLayout>` methods."""
_valid_modes = ['/UseNone', '/UseOutlines', '/UseThumbs', '/FullScreen', '/UseOC', '/UseAttachments']
def getPageMode(self):
"""
Get the page mode.
See :meth:`setPageMode()<PdfFileWriter.setPageMode>` for a description
of valid modes.
:return: Page mode currently being used.
:rtype: str, None if not specified
"""
try:
return self._root_object['/PageMode']
except KeyError:
return None
def setPageMode(self, mode):
"""
Set the page mode.
:param str mode: The page mode to use.
Valid modes are:
/UseNone Do not show outlines or thumbnails panels
/UseOutlines Show outlines (aka bookmarks) panel
/UseThumbs Show page thumbnails panel
/FullScreen Fullscreen view
/UseOC Show Optional Content Group (OCG) panel
/UseAttachments Show attachments panel
"""
if not isinstance(mode, NameObject):
if mode not in self._valid_modes:
warnings.warn("Mode should be one of: {}".format(', '.join(self._valid_modes)))
mode = NameObject(mode)
self._root_object.update({NameObject('/PageMode'): mode})
pageMode = property(getPageMode, setPageMode)
"""Read and write property accessing the :meth:`getPageMode()<PdfFileWriter.getPageMode>`
and :meth:`setPageMode()<PdfFileWriter.setPageMode>` methods."""
class PdfFileReader(object):
"""
Initializes a PdfFileReader object. This operation can take some time, as
the PDF stream's cross-reference tables are read into memory.
:param stream: A File object or an object that supports the standard read
and seek methods similar to a File object. Could also be a
string representing a path to a PDF file.
:param bool strict: Determines whether user should be warned of all
problems and also causes some correctable problems to be fatal.
Defaults to ``True``.
:param warndest: Destination for logging warnings (defaults to
``sys.stderr``).
:param bool overwriteWarnings: Determines whether to override Python's
``warnings.py`` module with a custom implementation (defaults to
``True``).
"""
def __init__(self, stream, strict=True, warndest = None, overwriteWarnings = True):
if overwriteWarnings:
# have to dynamically override the default showwarning since there are no
# public methods that specify the 'file' parameter
def _showwarning(message, category, filename, lineno, file=warndest, line=None):
if file is None:
file = sys.stderr
try:
file.write(formatWarning(message, category, filename, lineno, line))
except IOError:
pass
warnings.showwarning = _showwarning
self.strict = strict
self.flattenedPages = None
self.resolvedObjects = {}
self.xrefIndex = 0
self._pageId2Num = None # map page IndirectRef number to Page Number
if hasattr(stream, 'mode') and 'b' not in stream.mode:
warnings.warn("PdfFileReader stream/file object is not in binary mode. It may not be read correctly.", utils.PdfReadWarning)
if isString(stream):
fileobj = open(stream, 'rb')
stream = BytesIO(b_(fileobj.read()))
fileobj.close()
self.read(stream)
self.stream = stream
self._override_encryption = False
def getDocumentInfo(self):
"""
Retrieves the PDF file's document information dictionary, if it exists.
Note that some PDF files use metadata streams instead of docinfo
dictionaries, and these metadata streams will not be accessed by this
function.
:return: the document information of this PDF file
:rtype: :class:`DocumentInformation<pdf.DocumentInformation>` or ``None`` if none exists.
"""
if "/Info" not in self.trailer:
return None
obj = self.trailer['/Info']
retval = DocumentInformation()
retval.update(obj)
return retval
documentInfo = property(lambda self: self.getDocumentInfo(), None, None)
"""Read-only property that accesses the :meth:`getDocumentInfo()<PdfFileReader.getDocumentInfo>` function."""
def getXmpMetadata(self):
"""
Retrieves XMP (Extensible Metadata Platform) data from the PDF document
root.
:return: a :class:`XmpInformation<xmp.XmpInformation>`
instance that can be used to access XMP metadata from the document.
:rtype: :class:`XmpInformation<xmp.XmpInformation>` or
``None`` if no metadata was found on the document root.
"""
try:
self._override_encryption = True
return self.trailer["/Root"].getXmpMetadata()
finally:
self._override_encryption = False
xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None)
"""
Read-only property that accesses the
:meth:`getXmpMetadata()<PdfFileReader.getXmpMetadata>` function.
"""
def getNumPages(self):
"""
Calculates the number of pages in this PDF file.
:return: number of pages
:rtype: int
:raises PdfReadError: if file is encrypted and restrictions prevent
this action.
"""
# Flattened pages will not work on an Encrypted PDF;
# the PDF file's page count is used in this case. Otherwise,
# the original method (flattened page count) is used.
if self.isEncrypted:
try:
self._override_encryption = True
self.decrypt('')
return self.trailer["/Root"]["/Pages"]["/Count"]
except:
raise utils.PdfReadError("File has not been decrypted")
finally:
self._override_encryption = False
else:
if self.flattenedPages == None:
self._flatten()
return len(self.flattenedPages)
numPages = property(lambda self: self.getNumPages(), None, None)
"""
Read-only property that accesses the
:meth:`getNumPages()<PdfFileReader.getNumPages>` function.
"""
def getPage(self, pageNumber):
"""
Retrieves a page by number from this PDF file.
:param int pageNumber: The page number to retrieve
(pages begin at zero)
:return: a :class:`PageObject<pdf.PageObject>` instance.
:rtype: :class:`PageObject<pdf.PageObject>`
"""
## ensure that we're not trying to access an encrypted PDF
#assert not self.trailer.has_key("/Encrypt")
if self.flattenedPages == None:
self._flatten()
return self.flattenedPages[pageNumber]
namedDestinations = property(lambda self:
self.getNamedDestinations(), None, None)
"""
Read-only property that accesses the
:meth:`getNamedDestinations()<PdfFileReader.getNamedDestinations>` function.
"""
# A select group of relevant field attributes. For the complete list,
# see section 8.6.2 of the PDF 1.7 reference.
def getFields(self, tree = None, retval = None, fileobj = None):
"""
Extracts field data if this PDF contains interactive form fields.
The *tree* and *retval* parameters are for recursive use.
:param fileobj: A file object (usually a text file) to write
a report to on all interactive form fields found.
:return: A dictionary where each key is a field name, and each
value is a :class:`Field<PyPDF2.generic.Field>` object. By
default, the mapping name is used for keys.
:rtype: dict, or ``None`` if form data could not be located.
"""
fieldAttributes = {"/FT" : "Field Type", "/Parent" : "Parent",
"/T" : "Field Name", "/TU" : "Alternate Field Name",
"/TM" : "Mapping Name", "/Ff" : "Field Flags",
"/V" : "Value", "/DV" : "Default Value"}
if retval == None:
retval = {}
catalog = self.trailer["/Root"]
# get the AcroForm tree
if "/AcroForm" in catalog:
tree = catalog["/AcroForm"]
else:
return None
if tree == None:
return retval
self._checkKids(tree, retval, fileobj)
for attr in fieldAttributes:
if attr in tree:
# Tree is a field
self._buildField(tree, retval, fileobj, fieldAttributes)
break
if "/Fields" in tree:
fields = tree["/Fields"]
for f in fields:
field = f.getObject()
self._buildField(field, retval, fileobj, fieldAttributes)
return retval
def _buildField(self, field, retval, fileobj, fieldAttributes):
self._checkKids(field, retval, fileobj)
try:
key = field["/TM"]
except KeyError:
try:
key = field["/T"]
except KeyError:
# Ignore no-name field for now
return
if fileobj:
self._writeField(fileobj, field, fieldAttributes)
fileobj.write("\n")
retval[key] = Field(field)
def _checkKids(self, tree, retval, fileobj):
if "/Kids" in tree:
# recurse down the tree
for kid in tree["/Kids"]:
self.getFields(kid.getObject(), retval, fileobj)
def _writeField(self, fileobj, field, fieldAttributes):
order = ["/TM", "/T", "/FT", "/Parent", "/TU", "/Ff", "/V", "/DV"]
for attr in order:
attrName = fieldAttributes[attr]
try:
if attr == "/FT":
# Make the field type value more clear
types = {"/Btn":"Button", "/Tx":"Text", "/Ch": "Choice",
"/Sig":"Signature"}
if field[attr] in types:
fileobj.write(attrName + ": " + types[field[attr]] + "\n")
elif attr == "/Parent":
# Let's just write the name of the parent
try:
name = field["/Parent"]["/TM"]
except KeyError:
name = field["/Parent"]["/T"]
fileobj.write(attrName + ": " + name + "\n")
else:
fileobj.write(attrName + ": " + str(field[attr]) + "\n")
except KeyError:
# Field attribute is N/A or unknown, so don't write anything
pass
def getFormTextFields(self):
''' Retrieves form fields from the document with textual data (inputs, dropdowns)
'''
# Retrieve document form fields
formfields = self.getFields()
return dict(
(formfields[field]['/T'], formfields[field].get('/V')) for field in formfields \
if formfields[field].get('/FT') == '/Tx'
)
def getNamedDestinations(self, tree=None, retval=None):
"""
Retrieves the named destinations present in the document.
:return: a dictionary which maps names to
:class:`Destinations<PyPDF2.generic.Destination>`.
:rtype: dict
"""
if retval == None:
retval = {}
catalog = self.trailer["/Root"]
# get the name tree
if "/Dests" in catalog:
tree = catalog["/Dests"]
elif "/Names" in catalog:
names = catalog['/Names']
if "/Dests" in names:
tree = names['/Dests']
if tree == None:
return retval
if "/Kids" in tree:
# recurse down the tree
for kid in tree["/Kids"]:
self.getNamedDestinations(kid.getObject(), retval)
if "/Names" in tree:
names = tree["/Names"]
for i in range(0, len(names), 2):
key = names[i].getObject()
val = names[i+1].getObject()
if isinstance(val, DictionaryObject) and '/D' in val:
val = val['/D']
dest = self._buildDestination(key, val)
if dest != None:
retval[key] = dest
return retval
outlines = property(lambda self: self.getOutlines(), None, None)
"""
Read-only property that accesses the
:meth:`getOutlines()<PdfFileReader.getOutlines>` function.
"""
def getOutlines(self, node=None, outlines=None):
"""
Retrieves the document outline present in the document.
:return: a nested list of :class:`Destinations<PyPDF2.generic.Destination>`.
"""
if outlines == None:
outlines = []
catalog = self.trailer["/Root"]
# get the outline dictionary and named destinations
if "/Outlines" in catalog:
try:
lines = catalog["/Outlines"]
except utils.PdfReadError:
# this occurs if the /Outlines object reference is incorrect
# for an example of such a file, see https://unglueit-files.s3.amazonaws.com/ebf/7552c42e9280b4476e59e77acc0bc812.pdf
# so continue to load the file without the Bookmarks
return outlines
if "/First" in lines:
node = lines["/First"]
self._namedDests = self.getNamedDestinations()
if node == None:
return outlines
# see if there are any more outlines
while True:
outline = self._buildOutline(node)
if outline:
outlines.append(outline)
# check for sub-outlines
if "/First" in node:
subOutlines = []
self.getOutlines(node["/First"], subOutlines)
if subOutlines:
outlines.append(subOutlines)
if "/Next" not in node:
break
node = node["/Next"]
return outlines
def _getPageNumberByIndirect(self, indirectRef):
"""Generate _pageId2Num"""
if self._pageId2Num is None:
id2num = {}
for i, x in enumerate(self.pages):
id2num[x.indirectRef.idnum] = i
self._pageId2Num = id2num
if isinstance(indirectRef, int):
idnum = indirectRef
else:
idnum = indirectRef.idnum
ret = self._pageId2Num.get(idnum, -1)
return ret
def getPageNumber(self, page):
"""
Retrieve page number of a given PageObject
:param PageObject page: The page to get page number. Should be
an instance of :class:`PageObject<PyPDF2.pdf.PageObject>`
:return: the page number or -1 if page not found
:rtype: int
"""
indirectRef = page.indirectRef
ret = self._getPageNumberByIndirect(indirectRef)
return ret
def getDestinationPageNumber(self, destination):
"""
Retrieve page number of a given Destination object
:param Destination destination: The destination to get page number.
Should be an instance of
:class:`Destination<PyPDF2.pdf.Destination>`
:return: the page number or -1 if page not found
:rtype: int
"""
indirectRef = destination.page
ret = self._getPageNumberByIndirect(indirectRef)
return ret
def _buildDestination(self, title, array):
page, typ = array[0:2]
array = array[2:]
return Destination(title, page, typ, *array)
def _buildOutline(self, node):
dest, title, outline = None, None, None
if "/A" in node and "/Title" in node:
# Action, section 8.5 (only type GoTo supported)
title = node["/Title"]
action = node["/A"]
if action["/S"] == "/GoTo":
dest = action["/D"]
elif "/Dest" in node and "/Title" in node:
# Destination, section 8.2.1
title = node["/Title"]
dest = node["/Dest"]
# if destination found, then create outline
if dest:
if isinstance(dest, ArrayObject):
outline = self._buildDestination(title, dest)
elif isString(dest) and dest in self._namedDests:
outline = self._namedDests[dest]
outline[NameObject("/Title")] = title
else:
raise utils.PdfReadError("Unexpected destination %r" % dest)
return outline
pages = property(lambda self: ConvertFunctionsToVirtualList(self.getNumPages, self.getPage),
None, None)
"""
Read-only property that emulates a list based upon the
:meth:`getNumPages()<PdfFileReader.getNumPages>` and
:meth:`getPage()<PdfFileReader.getPage>` methods.
"""
def getPageLayout(self):
"""
Get the page layout.
See :meth:`setPageLayout()<PdfFileWriter.setPageLayout>`
for a description of valid layouts.
:return: Page layout currently being used.
:rtype: ``str``, ``None`` if not specified
"""
try:
return self.trailer['/Root']['/PageLayout']
except KeyError:
return None
pageLayout = property(getPageLayout)
"""Read-only property accessing the
:meth:`getPageLayout()<PdfFileReader.getPageLayout>` method."""
def getPageMode(self):
"""
Get the page mode.
See :meth:`setPageMode()<PdfFileWriter.setPageMode>`
for a description of valid modes.
:return: Page mode currently being used.
:rtype: ``str``, ``None`` if not specified
"""
try:
return self.trailer['/Root']['/PageMode']
except KeyError:
return None
pageMode = property(getPageMode)
"""Read-only property accessing the
:meth:`getPageMode()<PdfFileReader.getPageMode>` method."""
def _flatten(self, pages=None, inherit=None, indirectRef=None):
inheritablePageAttributes = (
NameObject("/Resources"), NameObject("/MediaBox"),
NameObject("/CropBox"), NameObject("/Rotate")
)
if inherit == None:
inherit = dict()
if pages == None:
self.flattenedPages = []
catalog = self.trailer["/Root"].getObject()
pages = catalog["/Pages"].getObject()
t = "/Pages"
if "/Type" in pages:
t = pages["/Type"]
if t == "/Pages":
for attr in inheritablePageAttributes:
if attr in pages:
inherit[attr] = pages[attr]
for page in pages["/Kids"]:
addt = {}
if isinstance(page, IndirectObject):
addt["indirectRef"] = page
self._flatten(page.getObject(), inherit, **addt)
elif t == "/Page":
for attr, value in list(inherit.items()):
# if the page has it's own value, it does not inherit the
# parent's value:
if attr not in pages:
pages[attr] = value
pageObj = PageObject(self, indirectRef)
pageObj.update(pages)
self.flattenedPages.append(pageObj)
def _getObjectFromStream(self, indirectReference):
# indirect reference to object in object stream
# read the entire object stream into memory
debug = False
stmnum, idx = self.xref_objStm[indirectReference.idnum]
if debug: print(("Here1: %s %s"%(stmnum, idx)))
objStm = IndirectObject(stmnum, 0, self).getObject()
if debug: print(("Here2: objStm=%s.. stmnum=%s data=%s"%(objStm, stmnum, objStm.getData())))
# This is an xref to a stream, so its type better be a stream
assert objStm['/Type'] == '/ObjStm'
# /N is the number of indirect objects in the stream
assert idx < objStm['/N']
streamData = BytesIO(b_(objStm.getData()))
for i in range(objStm['/N']):
readNonWhitespace(streamData)
streamData.seek(-1, 1)
objnum = NumberObject.readFromStream(streamData)
readNonWhitespace(streamData)
streamData.seek(-1, 1)
offset = NumberObject.readFromStream(streamData)
readNonWhitespace(streamData)
streamData.seek(-1, 1)
if objnum != indirectReference.idnum:
# We're only interested in one object
continue
if self.strict and idx != i:
raise utils.PdfReadError("Object is in wrong index.")
streamData.seek(objStm['/First']+offset, 0)
if debug:
pos = streamData.tell()
streamData.seek(0, 0)
lines = streamData.readlines()
for i in range(0, len(lines)):
print((lines[i]))
streamData.seek(pos, 0)
try:
obj = readObject(streamData, self)
except utils.PdfStreamError as e:
# Stream object cannot be read. Normally, a critical error, but
# Adobe Reader doesn't complain, so continue (in strict mode?)
e = sys.exc_info()[1]
warnings.warn("Invalid stream (index %d) within object %d %d: %s" % \
(i, indirectReference.idnum, indirectReference.generation, e), utils.PdfReadWarning)
if self.strict:
raise utils.PdfReadError("Can't read object stream: %s"%e)
# Replace with null. Hopefully it's nothing important.
obj = NullObject()
return obj
if self.strict: raise utils.PdfReadError("This is a fatal error in strict mode.")
return NullObject()
def getObject(self, indirectReference):
debug = False
if debug: print(("looking at:", indirectReference.idnum, indirectReference.generation))
retval = self.cacheGetIndirectObject(indirectReference.generation,
indirectReference.idnum)
if retval != None:
return retval
if indirectReference.generation == 0 and \
indirectReference.idnum in self.xref_objStm:
retval = self._getObjectFromStream(indirectReference)
elif indirectReference.generation in self.xref and \
indirectReference.idnum in self.xref[indirectReference.generation]:
start = self.xref[indirectReference.generation][indirectReference.idnum]
if debug: print((" Uncompressed Object", indirectReference.idnum, indirectReference.generation, ":", start))
self.stream.seek(start, 0)
idnum, generation = self.readObjectHeader(self.stream)
if idnum != indirectReference.idnum and self.xrefIndex:
# Xref table probably had bad indexes due to not being zero-indexed
if self.strict:
raise utils.PdfReadError("Expected object ID (%d %d) does not match actual (%d %d); xref table not zero-indexed." \
% (indirectReference.idnum, indirectReference.generation, idnum, generation))
else: pass # xref table is corrected in non-strict mode
elif idnum != indirectReference.idnum:
# some other problem
raise utils.PdfReadError("Expected object ID (%d %d) does not match actual (%d %d)." \
% (indirectReference.idnum, indirectReference.generation, idnum, generation))
assert generation == indirectReference.generation
retval = readObject(self.stream, self)
# override encryption is used for the /Encrypt dictionary
if not self._override_encryption and self.isEncrypted:
# if we don't have the encryption key:
if not hasattr(self, '_decryption_key'):
raise utils.PdfReadError("file has not been decrypted")
# otherwise, decrypt here...
import struct
pack1 = struct.pack("<i", indirectReference.idnum)[:3]
pack2 = struct.pack("<i", indirectReference.generation)[:2]
key = self._decryption_key + pack1 + pack2
assert len(key) == (len(self._decryption_key) + 5)
md5_hash = md5(key).digest()
key = md5_hash[:min(16, len(self._decryption_key) + 5)]
retval = self._decryptObject(retval, key)
else:
warnings.warn("Object %d %d not defined."%(indirectReference.idnum,
indirectReference.generation), utils.PdfReadWarning)
#if self.strict:
raise utils.PdfReadError("Could not find object.")
self.cacheIndirectObject(indirectReference.generation,
indirectReference.idnum, retval)
return retval
def _decryptObject(self, obj, key):
if isinstance(obj, ByteStringObject) or isinstance(obj, TextStringObject):
obj = createStringObject(utils.RC4_encrypt(key, obj.original_bytes))
elif isinstance(obj, StreamObject):
obj._data = utils.RC4_encrypt(key, obj._data)
elif isinstance(obj, DictionaryObject):
for dictkey, value in list(obj.items()):
obj[dictkey] = self._decryptObject(value, key)
elif isinstance(obj, ArrayObject):
for i in range(len(obj)):
obj[i] = self._decryptObject(obj[i], key)
return obj
def readObjectHeader(self, stream):
# Should never be necessary to read out whitespace, since the
# cross-reference table should put us in the right spot to read the
# object header. In reality... some files have stupid cross reference
# tables that are off by whitespace bytes.
extra = False
utils.skipOverComment(stream)
extra |= utils.skipOverWhitespace(stream); stream.seek(-1, 1)
idnum = readUntilWhitespace(stream)
extra |= utils.skipOverWhitespace(stream); stream.seek(-1, 1)
generation = readUntilWhitespace(stream)
obj = stream.read(3)
readNonWhitespace(stream)
stream.seek(-1, 1)
if (extra and self.strict):
#not a fatal error
warnings.warn("Superfluous whitespace found in object header %s %s" % \
(idnum, generation), utils.PdfReadWarning)
return int(idnum), int(generation)
def cacheGetIndirectObject(self, generation, idnum):
debug = False
out = self.resolvedObjects.get((generation, idnum))
if debug and out: print(("cache hit: %d %d"%(idnum, generation)))
elif debug: print(("cache miss: %d %d"%(idnum, generation)))
return out
def cacheIndirectObject(self, generation, idnum, obj):
# return None # Sometimes we want to turn off cache for debugging.
if (generation, idnum) in self.resolvedObjects:
msg = "Overwriting cache for %s %s"%(generation, idnum)
if self.strict: raise utils.PdfReadError(msg)
else: warnings.warn(msg)
self.resolvedObjects[(generation, idnum)] = obj
return obj
def read(self, stream):
debug = False
if debug: print(">>read", stream)
# start at the end:
stream.seek(-1, 2)
if not stream.tell():
raise utils.PdfReadError('Cannot read an empty file')
last1K = stream.tell() - 1024 + 1 # offset of last 1024 bytes of stream
line = b_('')
while line[:5] != b_("%%EOF"):
if stream.tell() < last1K:
raise utils.PdfReadError("EOF marker not found")
line = self.readNextEndLine(stream)
if debug: print(" line:",line)
# find startxref entry - the location of the xref table
line = self.readNextEndLine(stream)
try:
startxref = int(line)
except ValueError:
# 'startxref' may be on the same line as the location
if not line.startswith(b_("startxref")):
raise utils.PdfReadError("startxref not found")
startxref = int(line[9:].strip())
warnings.warn("startxref on same line as offset")
else:
line = self.readNextEndLine(stream)
if line[:9] != b_("startxref"):
raise utils.PdfReadError("startxref not found")
# read all cross reference tables and their trailers
self.xref = {}
self.xref_objStm = {}
self.trailer = DictionaryObject()
while True:
# load the xref table
stream.seek(startxref, 0)
x = stream.read(1)
if x == b_("x"):
# standard cross-reference table
ref = stream.read(4)
if ref[:3] != b_("ref"):
raise utils.PdfReadError("xref table read error")
readNonWhitespace(stream)
stream.seek(-1, 1)
firsttime = True; # check if the first time looking at the xref table
while True:
num = readObject(stream, self)
if firsttime and num != 0:
self.xrefIndex = num
if self.strict:
warnings.warn("Xref table not zero-indexed. ID numbers for objects will be corrected.", utils.PdfReadWarning)
#if table not zero indexed, could be due to error from when PDF was created
#which will lead to mismatched indices later on, only warned and corrected if self.strict=True
firsttime = False
readNonWhitespace(stream)
stream.seek(-1, 1)
size = readObject(stream, self)
readNonWhitespace(stream)
stream.seek(-1, 1)
cnt = 0
while cnt < size:
line = stream.read(20)
# It's very clear in section 3.4.3 of the PDF spec
# that all cross-reference table lines are a fixed
# 20 bytes (as of PDF 1.7). However, some files have
# 21-byte entries (or more) due to the use of \r\n
# (CRLF) EOL's. Detect that case, and adjust the line
# until it does not begin with a \r (CR) or \n (LF).
while line[0] in b_("\x0D\x0A"):
stream.seek(-20 + 1, 1)
line = stream.read(20)
# On the other hand, some malformed PDF files
# use a single character EOL without a preceeding
# space. Detect that case, and seek the stream
# back one character. (0-9 means we've bled into
# the next xref entry, t means we've bled into the
# text "trailer"):
if line[-1] in b_("0123456789t"):
stream.seek(-1, 1)
offset, generation = line[:16].split(b_(" "))
offset, generation = int(offset), int(generation)
if generation not in self.xref:
self.xref[generation] = {}
if num in self.xref[generation]:
# It really seems like we should allow the last
# xref table in the file to override previous
# ones. Since we read the file backwards, assume
# any existing key is already set correctly.
pass
else:
self.xref[generation][num] = offset
cnt += 1
num += 1
readNonWhitespace(stream)
stream.seek(-1, 1)
trailertag = stream.read(7)
if trailertag != b_("trailer"):
# more xrefs!
stream.seek(-7, 1)
else:
break
readNonWhitespace(stream)
stream.seek(-1, 1)
newTrailer = readObject(stream, self)
for key, value in list(newTrailer.items()):
if key not in self.trailer:
self.trailer[key] = value
if "/Prev" in newTrailer:
startxref = newTrailer["/Prev"]
else:
break
elif x.isdigit():
# PDF 1.5+ Cross-Reference Stream
stream.seek(-1, 1)
idnum, generation = self.readObjectHeader(stream)
xrefstream = readObject(stream, self)
assert xrefstream["/Type"] == "/XRef"
self.cacheIndirectObject(generation, idnum, xrefstream)
streamData = BytesIO(b_(xrefstream.getData()))
# Index pairs specify the subsections in the dictionary. If
# none create one subsection that spans everything.
idx_pairs = xrefstream.get("/Index", [0, xrefstream.get("/Size")])
if debug: print(("read idx_pairs=%s"%list(self._pairs(idx_pairs))))
entrySizes = xrefstream.get("/W")
assert len(entrySizes) >= 3
if self.strict and len(entrySizes) > 3:
raise utils.PdfReadError("Too many entry sizes: %s" %entrySizes)
def getEntry(i):
# Reads the correct number of bytes for each entry. See the
# discussion of the W parameter in PDF spec table 17.
if entrySizes[i] > 0:
d = streamData.read(entrySizes[i])
return convertToInt(d, entrySizes[i])
# PDF Spec Table 17: A value of zero for an element in the
# W array indicates...the default value shall be used
if i == 0: return 1 # First value defaults to 1
else: return 0
def used_before(num, generation):
# We move backwards through the xrefs, don't replace any.
return num in self.xref.get(generation, []) or \
num in self.xref_objStm
# Iterate through each subsection
last_end = 0
for start, size in self._pairs(idx_pairs):
# The subsections must increase
assert start >= last_end
last_end = start + size
for num in range(start, start+size):
# The first entry is the type
xref_type = getEntry(0)
# The rest of the elements depend on the xref_type
if xref_type == 0:
# linked list of free objects
next_free_object = getEntry(1)
next_generation = getEntry(2)
elif xref_type == 1:
# objects that are in use but are not compressed
byte_offset = getEntry(1)
generation = getEntry(2)
if generation not in self.xref:
self.xref[generation] = {}
if not used_before(num, generation):
self.xref[generation][num] = byte_offset
if debug: print(("XREF Uncompressed: %s %s"%(
num, generation)))
elif xref_type == 2:
# compressed objects
objstr_num = getEntry(1)
obstr_idx = getEntry(2)
generation = 0 # PDF spec table 18, generation is 0
if not used_before(num, generation):
if debug: print(("XREF Compressed: %s %s %s"%(
num, objstr_num, obstr_idx)))
self.xref_objStm[num] = (objstr_num, obstr_idx)
elif self.strict:
raise utils.PdfReadError("Unknown xref type: %s"%
xref_type)
trailerKeys = "/Root", "/Encrypt", "/Info", "/ID"
for key in trailerKeys:
if key in xrefstream and key not in self.trailer:
self.trailer[NameObject(key)] = xrefstream.raw_get(key)
if "/Prev" in xrefstream:
startxref = xrefstream["/Prev"]
else:
break
else:
# bad xref character at startxref. Let's see if we can find
# the xref table nearby, as we've observed this error with an
# off-by-one before.
stream.seek(-11, 1)
tmp = stream.read(20)
xref_loc = tmp.find(b_("xref"))
if xref_loc != -1:
startxref -= (10 - xref_loc)
continue
# No explicit xref table, try finding a cross-reference stream.
stream.seek(startxref, 0)
found = False
for look in range(5):
if stream.read(1).isdigit():
# This is not a standard PDF, consider adding a warning
startxref += look
found = True
break
if found:
continue
# no xref table found at specified location
raise utils.PdfReadError("Could not find xref table at specified location")
#if not zero-indexed, verify that the table is correct; change it if necessary
if self.xrefIndex and not self.strict:
loc = stream.tell()
for gen in self.xref:
if gen == 65535: continue
for id in self.xref[gen]:
stream.seek(self.xref[gen][id], 0)
try:
pid, pgen = self.readObjectHeader(stream)
except ValueError:
break
if pid == id - self.xrefIndex:
self._zeroXref(gen)
break
#if not, then either it's just plain wrong, or the non-zero-index is actually correct
stream.seek(loc, 0) #return to where it was
def _zeroXref(self, generation):
self.xref[generation] = dict( (k-self.xrefIndex, v) for (k, v) in list(self.xref[generation].items()) )
def _pairs(self, array):
i = 0
while True:
yield array[i], array[i+1]
i += 2
if (i+1) >= len(array):
break
def readNextEndLine(self, stream):
debug = False
if debug: print(">>readNextEndLine")
line = b_("")
while True:
# Prevent infinite loops in malformed PDFs
if stream.tell() == 0:
raise utils.PdfReadError("Could not read malformed PDF file")
x = stream.read(1)
if debug: print((" x:", x, "%x"%ord(x)))
if stream.tell() < 2:
raise utils.PdfReadError("EOL marker not found")
stream.seek(-2, 1)
if x == b_('\n') or x == b_('\r'): ## \n = LF; \r = CR
crlf = False
while x == b_('\n') or x == b_('\r'):
if debug:
if ord(x) == 0x0D: print(" x is CR 0D")
elif ord(x) == 0x0A: print(" x is LF 0A")
x = stream.read(1)
if x == b_('\n') or x == b_('\r'): # account for CR+LF
stream.seek(-1, 1)
crlf = True
if stream.tell() < 2:
raise utils.PdfReadError("EOL marker not found")
stream.seek(-2, 1)
stream.seek(2 if crlf else 1, 1) #if using CR+LF, go back 2 bytes, else 1
break
else:
if debug: print(" x is neither")
line = x + line
if debug: print((" RNEL line:", line))
if debug: print("leaving RNEL")
return line
def decrypt(self, password):
"""
When using an encrypted / secured PDF file with the PDF Standard
encryption handler, this function will allow the file to be decrypted.
It checks the given password against the document's user password and
owner password, and then stores the resulting decryption key if either
password is correct.
It does not matter which password was matched. Both passwords provide
the correct decryption key that will allow the document to be used with
this library.
:param str password: The password to match.
:return: ``0`` if the password failed, ``1`` if the password matched the user
password, and ``2`` if the password matched the owner password.
:rtype: int
:raises NotImplementedError: if document uses an unsupported encryption
method.
"""
self._override_encryption = True
try:
return self._decrypt(password)
finally:
self._override_encryption = False
def _decrypt(self, password):
encrypt = self.trailer['/Encrypt'].getObject()
if encrypt['/Filter'] != '/Standard':
raise NotImplementedError("only Standard PDF encryption handler is available")
if not (encrypt['/V'] in (1, 2)):
raise NotImplementedError("only algorithm code 1 and 2 are supported")
user_password, key = self._authenticateUserPassword(password)
if user_password:
self._decryption_key = key
return 1
else:
rev = encrypt['/R'].getObject()
if rev == 2:
keylen = 5
else:
keylen = encrypt['/Length'].getObject() // 8
key = _alg33_1(password, rev, keylen)
real_O = encrypt["/O"].getObject()
if rev == 2:
userpass = utils.RC4_encrypt(key, real_O)
else:
val = real_O
for i in range(19, -1, -1):
new_key = b_('')
for l in range(len(key)):
new_key += b_(chr(utils.ord_(key[l]) ^ i))
val = utils.RC4_encrypt(new_key, val)
userpass = val
owner_password, key = self._authenticateUserPassword(userpass)
if owner_password:
self._decryption_key = key
return 2
return 0
def _authenticateUserPassword(self, password):
encrypt = self.trailer['/Encrypt'].getObject()
rev = encrypt['/R'].getObject()
owner_entry = encrypt['/O'].getObject()
p_entry = encrypt['/P'].getObject()
id_entry = self.trailer['/ID'].getObject()
id1_entry = id_entry[0].getObject()
real_U = encrypt['/U'].getObject().original_bytes
if rev == 2:
U, key = _alg34(password, owner_entry, p_entry, id1_entry)
elif rev >= 3:
U, key = _alg35(password, rev,
encrypt["/Length"].getObject() // 8, owner_entry,
p_entry, id1_entry,
encrypt.get("/EncryptMetadata", BooleanObject(False)).getObject())
U, real_U = U[:16], real_U[:16]
return U == real_U, key
def getIsEncrypted(self):
return "/Encrypt" in self.trailer
isEncrypted = property(lambda self: self.getIsEncrypted(), None, None)
"""
Read-only boolean property showing whether this PDF file is encrypted.
Note that this property, if true, will remain true even after the
:meth:`decrypt()<PdfFileReader.decrypt>` method is called.
"""
def getRectangle(self, name, defaults):
retval = self.get(name)
if isinstance(retval, RectangleObject):
return retval
if retval == None:
for d in defaults:
retval = self.get(d)
if retval != None:
break
if isinstance(retval, IndirectObject):
retval = self.pdf.getObject(retval)
retval = RectangleObject(retval)
setRectangle(self, name, retval)
return retval
def setRectangle(self, name, value):
if not isinstance(name, NameObject):
name = NameObject(name)
self[name] = value
def deleteRectangle(self, name):
del self[name]
def createRectangleAccessor(name, fallback):
return \
property(
lambda self: getRectangle(self, name, fallback),
lambda self, value: setRectangle(self, name, value),
lambda self: deleteRectangle(self, name)
)
class PageObject(DictionaryObject):
"""
This class represents a single page within a PDF file. Typically this
object will be created by accessing the
:meth:`getPage()<PyPDF2.PdfFileReader.getPage>` method of the
:class:`PdfFileReader<PyPDF2.PdfFileReader>` class, but it is
also possible to create an empty page with the
:meth:`createBlankPage()<PageObject.createBlankPage>` static method.
:param pdf: PDF file the page belongs to.
:param indirectRef: Stores the original indirect reference to
this object in its source PDF
"""
def __init__(self, pdf=None, indirectRef=None):
DictionaryObject.__init__(self)
self.pdf = pdf
self.indirectRef = indirectRef
def createBlankPage(pdf=None, width=None, height=None):
"""
Returns a new blank page.
If ``width`` or ``height`` is ``None``, try to get the page size
from the last page of *pdf*.
:param pdf: PDF file the page belongs to
:param float width: The width of the new page expressed in default user
space units.
:param float height: The height of the new page expressed in default user
space units.
:return: the new blank page:
:rtype: :class:`PageObject<PageObject>`
:raises PageSizeNotDefinedError: if ``pdf`` is ``None`` or contains
no page
"""
page = PageObject(pdf)
# Creates a new page (cf PDF Reference 7.7.3.3)
page.__setitem__(NameObject('/Type'), NameObject('/Page'))
page.__setitem__(NameObject('/Parent'), NullObject())
page.__setitem__(NameObject('/Resources'), DictionaryObject())
if width is None or height is None:
if pdf is not None and pdf.getNumPages() > 0:
lastpage = pdf.getPage(pdf.getNumPages() - 1)
width = lastpage.mediaBox.getWidth()
height = lastpage.mediaBox.getHeight()
else:
raise utils.PageSizeNotDefinedError()
page.__setitem__(NameObject('/MediaBox'),
RectangleObject([0, 0, width, height]))
return page
createBlankPage = staticmethod(createBlankPage)
def rotateClockwise(self, angle):
"""
Rotates a page clockwise by increments of 90 degrees.
:param int angle: Angle to rotate the page. Must be an increment
of 90 deg.
"""
assert angle % 90 == 0
self._rotate(angle)
return self
def rotateCounterClockwise(self, angle):
"""
Rotates a page counter-clockwise by increments of 90 degrees.
:param int angle: Angle to rotate the page. Must be an increment
of 90 deg.
"""
assert angle % 90 == 0
self._rotate(-angle)
return self
def _rotate(self, angle):
currentAngle = self.get("/Rotate", 0)
self[NameObject("/Rotate")] = NumberObject(currentAngle + angle)
def _mergeResources(res1, res2, resource):
newRes = DictionaryObject()
newRes.update(res1.get(resource, DictionaryObject()).getObject())
page2Res = res2.get(resource, DictionaryObject()).getObject()
renameRes = {}
for key in list(page2Res.keys()):
if key in newRes and newRes.raw_get(key) != page2Res.raw_get(key):
newname = NameObject(key + str(uuid.uuid4()))
renameRes[key] = newname
newRes[newname] = page2Res[key]
elif key not in newRes:
newRes[key] = page2Res.raw_get(key)
return newRes, renameRes
_mergeResources = staticmethod(_mergeResources)
def _contentStreamRename(stream, rename, pdf):
if not rename:
return stream
stream = ContentStream(stream, pdf)
for operands, operator in stream.operations:
for i in range(len(operands)):
op = operands[i]
if isinstance(op, NameObject):
operands[i] = rename.get(op,op)
return stream
_contentStreamRename = staticmethod(_contentStreamRename)
def _pushPopGS(contents, pdf):
# adds a graphics state "push" and "pop" to the beginning and end
# of a content stream. This isolates it from changes such as
# transformation matricies.
stream = ContentStream(contents, pdf)
stream.operations.insert(0, [[], "q"])
stream.operations.append([[], "Q"])
return stream
_pushPopGS = staticmethod(_pushPopGS)
def _addTransformationMatrix(contents, pdf, ctm):
# adds transformation matrix at the beginning of the given
# contents stream.
a, b, c, d, e, f = ctm
contents = ContentStream(contents, pdf)
contents.operations.insert(0, [[FloatObject(a), FloatObject(b),
FloatObject(c), FloatObject(d), FloatObject(e),
FloatObject(f)], " cm"])
return contents
_addTransformationMatrix = staticmethod(_addTransformationMatrix)
def getContents(self):
"""
Accesses the page contents.
:return: the ``/Contents`` object, or ``None`` if it doesn't exist.
``/Contents`` is optional, as described in PDF Reference 7.7.3.3
"""
if "/Contents" in self:
return self["/Contents"].getObject()
else:
return None
def mergePage(self, page2):
"""
Merges the content streams of two pages into one. Resource references
(i.e. fonts) are maintained from both pages. The mediabox/cropbox/etc
of this page are not altered. The parameter page's content stream will
be added to the end of this page's content stream, meaning that it will
be drawn after, or "on top" of this page.
:param PageObject page2: The page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
"""
self._mergePage(page2)
def _mergePage(self, page2, page2transformation=None, ctm=None, expand=False):
# First we work on merging the resource dictionaries. This allows us
# to find out what symbols in the content streams we might need to
# rename.
newResources = DictionaryObject()
rename = {}
originalResources = self["/Resources"].getObject()
page2Resources = page2["/Resources"].getObject()
newAnnots = ArrayObject()
for page in (self, page2):
if "/Annots" in page:
annots = page["/Annots"]
if isinstance(annots, ArrayObject):
for ref in annots:
newAnnots.append(ref)
for res in "/ExtGState", "/Font", "/XObject", "/ColorSpace", "/Pattern", "/Shading", "/Properties":
new, newrename = PageObject._mergeResources(originalResources, page2Resources, res)
if new:
newResources[NameObject(res)] = new
rename.update(newrename)
# Combine /ProcSet sets.
newResources[NameObject("/ProcSet")] = ArrayObject(
frozenset(originalResources.get("/ProcSet", ArrayObject()).getObject()).union(
frozenset(page2Resources.get("/ProcSet", ArrayObject()).getObject())
)
)
newContentArray = ArrayObject()
originalContent = self.getContents()
if originalContent is not None:
newContentArray.append(PageObject._pushPopGS(
originalContent, self.pdf))
page2Content = page2.getContents()
if page2Content is not None:
if page2transformation is not None:
page2Content = page2transformation(page2Content)
page2Content = PageObject._contentStreamRename(
page2Content, rename, self.pdf)
page2Content = PageObject._pushPopGS(page2Content, self.pdf)
newContentArray.append(page2Content)
# if expanding the page to fit a new page, calculate the new media box size
if expand:
corners1 = [self.mediaBox.getLowerLeft_x().as_numeric(), self.mediaBox.getLowerLeft_y().as_numeric(),
self.mediaBox.getUpperRight_x().as_numeric(), self.mediaBox.getUpperRight_y().as_numeric()]
corners2 = [page2.mediaBox.getLowerLeft_x().as_numeric(), page2.mediaBox.getLowerLeft_y().as_numeric(),
page2.mediaBox.getUpperLeft_x().as_numeric(), page2.mediaBox.getUpperLeft_y().as_numeric(),
page2.mediaBox.getUpperRight_x().as_numeric(), page2.mediaBox.getUpperRight_y().as_numeric(),
page2.mediaBox.getLowerRight_x().as_numeric(), page2.mediaBox.getLowerRight_y().as_numeric()]
if ctm is not None:
ctm = [float(x) for x in ctm]
new_x = [ctm[0]*corners2[i] + ctm[2]*corners2[i+1] + ctm[4] for i in range(0, 8, 2)]
new_y = [ctm[1]*corners2[i] + ctm[3]*corners2[i+1] + ctm[5] for i in range(0, 8, 2)]
else:
new_x = corners2[0:8:2]
new_y = corners2[1:8:2]
lowerleft = [min(new_x), min(new_y)]
upperright = [max(new_x), max(new_y)]
lowerleft = [min(corners1[0], lowerleft[0]), min(corners1[1], lowerleft[1])]
upperright = [max(corners1[2], upperright[0]), max(corners1[3], upperright[1])]
self.mediaBox.setLowerLeft(lowerleft)
self.mediaBox.setUpperRight(upperright)
self[NameObject('/Contents')] = ContentStream(newContentArray, self.pdf)
self[NameObject('/Resources')] = newResources
self[NameObject('/Annots')] = newAnnots
def mergeTransformedPage(self, page2, ctm, expand=False):
"""
This is similar to mergePage, but a transformation matrix is
applied to the merged stream.
:param PageObject page2: The page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param tuple ctm: a 6-element tuple containing the operands of the
transformation matrix
:param bool expand: Whether the page should be expanded to fit the dimensions
of the page to be merged.
"""
self._mergePage(page2, lambda page2Content:
PageObject._addTransformationMatrix(page2Content, page2.pdf, ctm), ctm, expand)
def mergeScaledPage(self, page2, scale, expand=False):
"""
This is similar to mergePage, but the stream to be merged is scaled
by appling a transformation matrix.
:param PageObject page2: The page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float scale: The scaling factor
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
# CTM to scale : [ sx 0 0 sy 0 0 ]
return self.mergeTransformedPage(page2, [scale, 0,
0, scale,
0, 0], expand)
def mergeRotatedPage(self, page2, rotation, expand=False):
"""
This is similar to mergePage, but the stream to be merged is rotated
by appling a transformation matrix.
:param PageObject page2: the page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float rotation: The angle of the rotation, in degrees
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
rotation = math.radians(rotation)
return self.mergeTransformedPage(page2,
[math.cos(rotation), math.sin(rotation),
-math.sin(rotation), math.cos(rotation),
0, 0], expand)
def mergeTranslatedPage(self, page2, tx, ty, expand=False):
"""
This is similar to mergePage, but the stream to be merged is translated
by appling a transformation matrix.
:param PageObject page2: the page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float tx: The translation on X axis
:param float ty: The translation on Y axis
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
return self.mergeTransformedPage(page2, [1, 0,
0, 1,
tx, ty], expand)
def mergeRotatedTranslatedPage(self, page2, rotation, tx, ty, expand=False):
"""
This is similar to mergePage, but the stream to be merged is rotated
and translated by appling a transformation matrix.
:param PageObject page2: the page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float tx: The translation on X axis
:param float ty: The translation on Y axis
:param float rotation: The angle of the rotation, in degrees
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
translation = [[1, 0, 0],
[0, 1, 0],
[-tx, -ty, 1]]
rotation = math.radians(rotation)
rotating = [[math.cos(rotation), math.sin(rotation), 0],
[-math.sin(rotation), math.cos(rotation), 0],
[0, 0, 1]]
rtranslation = [[1, 0, 0],
[0, 1, 0],
[tx, ty, 1]]
ctm = utils.matrixMultiply(translation, rotating)
ctm = utils.matrixMultiply(ctm, rtranslation)
return self.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1],
ctm[1][0], ctm[1][1],
ctm[2][0], ctm[2][1]], expand)
def mergeRotatedScaledPage(self, page2, rotation, scale, expand=False):
"""
This is similar to mergePage, but the stream to be merged is rotated
and scaled by appling a transformation matrix.
:param PageObject page2: the page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float rotation: The angle of the rotation, in degrees
:param float scale: The scaling factor
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
rotation = math.radians(rotation)
rotating = [[math.cos(rotation), math.sin(rotation), 0],
[-math.sin(rotation), math.cos(rotation), 0],
[0, 0, 1]]
scaling = [[scale, 0, 0],
[0, scale, 0],
[0, 0, 1]]
ctm = utils.matrixMultiply(rotating, scaling)
return self.mergeTransformedPage(page2,
[ctm[0][0], ctm[0][1],
ctm[1][0], ctm[1][1],
ctm[2][0], ctm[2][1]], expand)
def mergeScaledTranslatedPage(self, page2, scale, tx, ty, expand=False):
"""
This is similar to mergePage, but the stream to be merged is translated
and scaled by appling a transformation matrix.
:param PageObject page2: the page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float scale: The scaling factor
:param float tx: The translation on X axis
:param float ty: The translation on Y axis
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
translation = [[1, 0, 0],
[0, 1, 0],
[tx, ty, 1]]
scaling = [[scale, 0, 0],
[0, scale, 0],
[0, 0, 1]]
ctm = utils.matrixMultiply(scaling, translation)
return self.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1],
ctm[1][0], ctm[1][1],
ctm[2][0], ctm[2][1]], expand)
def mergeRotatedScaledTranslatedPage(self, page2, rotation, scale, tx, ty, expand=False):
"""
This is similar to mergePage, but the stream to be merged is translated,
rotated and scaled by appling a transformation matrix.
:param PageObject page2: the page to be merged into this one. Should be
an instance of :class:`PageObject<PageObject>`.
:param float tx: The translation on X axis
:param float ty: The translation on Y axis
:param float rotation: The angle of the rotation, in degrees
:param float scale: The scaling factor
:param bool expand: Whether the page should be expanded to fit the
dimensions of the page to be merged.
"""
translation = [[1, 0, 0],
[0, 1, 0],
[tx, ty, 1]]
rotation = math.radians(rotation)
rotating = [[math.cos(rotation), math.sin(rotation), 0],
[-math.sin(rotation), math.cos(rotation), 0],
[0, 0, 1]]
scaling = [[scale, 0, 0],
[0, scale, 0],
[0, 0, 1]]
ctm = utils.matrixMultiply(rotating, scaling)
ctm = utils.matrixMultiply(ctm, translation)
return self.mergeTransformedPage(page2, [ctm[0][0], ctm[0][1],
ctm[1][0], ctm[1][1],
ctm[2][0], ctm[2][1]], expand)
##
# Applys a transformation matrix the page.
#
# @param ctm A 6 elements tuple containing the operands of the
# transformation matrix
def addTransformation(self, ctm):
"""
Applies a transformation matrix to the page.
:param tuple ctm: A 6-element tuple containing the operands of the
transformation matrix.
"""
originalContent = self.getContents()
if originalContent is not None:
newContent = PageObject._addTransformationMatrix(
originalContent, self.pdf, ctm)
newContent = PageObject._pushPopGS(newContent, self.pdf)
self[NameObject('/Contents')] = newContent
def scale(self, sx, sy):
"""
Scales a page by the given factors by appling a transformation
matrix to its content and updating the page size.
:param float sx: The scaling factor on horizontal axis.
:param float sy: The scaling factor on vertical axis.
"""
self.addTransformation([sx, 0,
0, sy,
0, 0])
self.mediaBox = RectangleObject([
float(self.mediaBox.getLowerLeft_x()) * sx,
float(self.mediaBox.getLowerLeft_y()) * sy,
float(self.mediaBox.getUpperRight_x()) * sx,
float(self.mediaBox.getUpperRight_y()) * sy])
if "/VP" in self:
viewport = self["/VP"]
if isinstance(viewport, ArrayObject):
bbox = viewport[0]["/BBox"]
else:
bbox = viewport["/BBox"]
scaled_bbox = RectangleObject([
float(bbox[0]) * sx,
float(bbox[1]) * sy,
float(bbox[2]) * sx,
float(bbox[3]) * sy])
if isinstance(viewport, ArrayObject):
self[NameObject("/VP")][NumberObject(0)][NameObject("/BBox")] = scaled_bbox
else:
self[NameObject("/VP")][NameObject("/BBox")] = scaled_bbox
def scaleBy(self, factor):
"""
Scales a page by the given factor by appling a transformation
matrix to its content and updating the page size.
:param float factor: The scaling factor (for both X and Y axis).
"""
self.scale(factor, factor)
def scaleTo(self, width, height):
"""
Scales a page to the specified dimentions by appling a
transformation matrix to its content and updating the page size.
:param float width: The new width.
:param float height: The new heigth.
"""
sx = width / float(self.mediaBox.getUpperRight_x() -
self.mediaBox.getLowerLeft_x ())
sy = height / float(self.mediaBox.getUpperRight_y() -
self.mediaBox.getLowerLeft_y ())
self.scale(sx, sy)
def compressContentStreams(self):
"""
Compresses the size of this page by joining all content streams and
applying a FlateDecode filter.
However, it is possible that this function will perform no action if
content stream compression becomes "automatic" for some reason.
"""
content = self.getContents()
if content is not None:
if not isinstance(content, ContentStream):
content = ContentStream(content, self.pdf)
self[NameObject("/Contents")] = content.flateEncode()
def extractText(self):
"""
Locate all text drawing commands, in the order they are provided in the
content stream, and extract the text. This works well for some PDF
files, but poorly for others, depending on the generator used. This will
be refined in the future. Do not rely on the order of text coming out of
this function, as it will change if this function is made more
sophisticated.
:return: a unicode string object.
"""
text = u_("")
content = self["/Contents"].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, self.pdf)
# Note: we check all strings are TextStringObjects. ByteStringObjects
# are strings where the byte->string encoding was unknown, so adding
# them to the text here would be gibberish.
for operands, operator in content.operations:
if operator == b_("Tj"):
_text = operands[0]
if isinstance(_text, TextStringObject):
text += _text
elif operator == b_("T*"):
text += "\n"
elif operator == b_("'"):
text += "\n"
_text = operands[0]
if isinstance(_text, TextStringObject):
text += operands[0]
elif operator == b_('"'):
_text = operands[2]
if isinstance(_text, TextStringObject):
text += "\n"
text += _text
elif operator == b_("TJ"):
for i in operands[0]:
if isinstance(i, TextStringObject):
text += i
text += "\n"
return text
mediaBox = createRectangleAccessor("/MediaBox", ())
"""
A :class:`RectangleObject<PyPDF2.generic.RectangleObject>`, expressed in default user space units,
defining the boundaries of the physical medium on which the page is
intended to be displayed or printed.
"""
cropBox = createRectangleAccessor("/CropBox", ("/MediaBox",))
"""
A :class:`RectangleObject<PyPDF2.generic.RectangleObject>`, expressed in default user space units,
defining the visible region of default user space. When the page is
displayed or printed, its contents are to be clipped (cropped) to this
rectangle and then imposed on the output medium in some
implementation-defined manner. Default value: same as :attr:`mediaBox<mediaBox>`.
"""
bleedBox = createRectangleAccessor("/BleedBox", ("/CropBox", "/MediaBox"))
"""
A :class:`RectangleObject<PyPDF2.generic.RectangleObject>`, expressed in default user space units,
defining the region to which the contents of the page should be clipped
when output in a production enviroment.
"""
trimBox = createRectangleAccessor("/TrimBox", ("/CropBox", "/MediaBox"))
"""
A :class:`RectangleObject<PyPDF2.generic.RectangleObject>`, expressed in default user space units,
defining the intended dimensions of the finished page after trimming.
"""
artBox = createRectangleAccessor("/ArtBox", ("/CropBox", "/MediaBox"))
"""
A :class:`RectangleObject<PyPDF2.generic.RectangleObject>`, expressed in default user space units,
defining the extent of the page's meaningful content as intended by the
page's creator.
"""
class ContentStream(DecodedStreamObject):
def __init__(self, stream, pdf):
self.pdf = pdf
self.operations = []
# stream may be a StreamObject or an ArrayObject containing
# multiple StreamObjects to be cat'd together.
stream = stream.getObject()
if isinstance(stream, ArrayObject):
data = b_("")
for s in stream:
data += s.getObject().getData()
stream = BytesIO(b_(data))
else:
stream = BytesIO(b_(stream.getData()))
self.__parseContentStream(stream)
def __parseContentStream(self, stream):
# file("f:\\tmp.txt", "w").write(stream.read())
stream.seek(0, 0)
operands = []
while True:
peek = readNonWhitespace(stream)
if peek == b_('') or ord_(peek) == 0:
break
stream.seek(-1, 1)
if peek.isalpha() or peek == b_("'") or peek == b_('"'):
operator = utils.readUntilRegex(stream,
NameObject.delimiterPattern, True)
if operator == b_("BI"):
# begin inline image - a completely different parsing
# mechanism is required, of course... thanks buddy...
assert operands == []
ii = self._readInlineImage(stream)
self.operations.append((ii, b_("INLINE IMAGE")))
else:
self.operations.append((operands, operator))
operands = []
elif peek == b_('%'):
# If we encounter a comment in the content stream, we have to
# handle it here. Typically, readObject will handle
# encountering a comment -- but readObject assumes that
# following the comment must be the object we're trying to
# read. In this case, it could be an operator instead.
while peek not in (b_('\r'), b_('\n')):
peek = stream.read(1)
else:
operands.append(readObject(stream, None))
def _readInlineImage(self, stream):
# begin reading just after the "BI" - begin image
# first read the dictionary of settings.
settings = DictionaryObject()
while True:
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
if tok == b_("I"):
# "ID" - begin of image data
break
key = readObject(stream, self.pdf)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
value = readObject(stream, self.pdf)
settings[key] = value
# left at beginning of ID
tmp = stream.read(3)
assert tmp[:2] == b_("ID")
data = b_("")
while True:
# Read the inline image, while checking for EI (End Image) operator.
tok = stream.read(1)
if tok == b_("E"):
# Check for End Image
tok2 = stream.read(1)
if tok2 == b_("I"):
# Data can contain EI, so check for the Q operator.
tok3 = stream.read(1)
info = tok + tok2
# We need to find whitespace between EI and Q.
has_q_whitespace = False
while tok3 in utils.WHITESPACES:
has_q_whitespace = True
info += tok3
tok3 = stream.read(1)
if tok3 == b_("Q") and has_q_whitespace:
stream.seek(-1, 1)
break
else:
stream.seek(-1,1)
data += info
else:
stream.seek(-1, 1)
data += tok
else:
data += tok
return {"settings": settings, "data": data}
def _getData(self):
newdata = BytesIO()
for operands, operator in self.operations:
if operator == b_("INLINE IMAGE"):
newdata.write(b_("BI"))
dicttext = BytesIO()
operands["settings"].writeToStream(dicttext, None)
newdata.write(dicttext.getvalue()[2:-2])
newdata.write(b_("ID "))
newdata.write(operands["data"])
newdata.write(b_("EI"))
else:
for op in operands:
op.writeToStream(newdata, None)
newdata.write(b_(" "))
newdata.write(b_(operator))
newdata.write(b_("\n"))
return newdata.getvalue()
def _setData(self, value):
self.__parseContentStream(BytesIO(b_(value)))
_data = property(_getData, _setData)
class DocumentInformation(DictionaryObject):
"""
A class representing the basic document metadata provided in a PDF File.
This class is accessible through
:meth:`getDocumentInfo()<PyPDF2.PdfFileReader.getDocumentInfo()>`
All text properties of the document metadata have
*two* properties, eg. author and author_raw. The non-raw property will
always return a ``TextStringObject``, making it ideal for a case where
the metadata is being displayed. The raw property can sometimes return
a ``ByteStringObject``, if PyPDF2 was unable to decode the string's
text encoding; this requires additional safety in the caller and
therefore is not as commonly accessed.
"""
def __init__(self):
DictionaryObject.__init__(self)
def getText(self, key):
retval = self.get(key, None)
if isinstance(retval, TextStringObject):
return retval
return None
title = property(lambda self: self.getText("/Title"))
"""Read-only property accessing the document's **title**.
Returns a unicode string (``TextStringObject``) or ``None``
if the title is not specified."""
title_raw = property(lambda self: self.get("/Title"))
"""The "raw" version of title; can return a ``ByteStringObject``."""
author = property(lambda self: self.getText("/Author"))
"""Read-only property accessing the document's **author**.
Returns a unicode string (``TextStringObject``) or ``None``
if the author is not specified."""
author_raw = property(lambda self: self.get("/Author"))
"""The "raw" version of author; can return a ``ByteStringObject``."""
subject = property(lambda self: self.getText("/Subject"))
"""Read-only property accessing the document's **subject**.
Returns a unicode string (``TextStringObject``) or ``None``
if the subject is not specified."""
subject_raw = property(lambda self: self.get("/Subject"))
"""The "raw" version of subject; can return a ``ByteStringObject``."""
creator = property(lambda self: self.getText("/Creator"))
"""Read-only property accessing the document's **creator**. If the
document was converted to PDF from another format, this is the name of the
application (e.g. OpenOffice) that created the original document from
which it was converted. Returns a unicode string (``TextStringObject``)
or ``None`` if the creator is not specified."""
creator_raw = property(lambda self: self.get("/Creator"))
"""The "raw" version of creator; can return a ``ByteStringObject``."""
producer = property(lambda self: self.getText("/Producer"))
"""Read-only property accessing the document's **producer**.
If the document was converted to PDF from another format, this is
the name of the application (for example, OSX Quartz) that converted
it to PDF. Returns a unicode string (``TextStringObject``)
or ``None`` if the producer is not specified."""
producer_raw = property(lambda self: self.get("/Producer"))
"""The "raw" version of producer; can return a ``ByteStringObject``."""
def convertToInt(d, size):
if size > 8:
raise utils.PdfReadError("invalid size in convertToInt")
d = b_("\x00\x00\x00\x00\x00\x00\x00\x00") + b_(d)
d = d[-8:]
return struct.unpack(">q", d)[0]
# ref: pdf1.8 spec section 3.5.2 algorithm 3.2
_encryption_padding = b_('\x28\xbf\x4e\x5e\x4e\x75\x8a\x41\x64\x00\x4e\x56') + \
b_('\xff\xfa\x01\x08\x2e\x2e\x00\xb6\xd0\x68\x3e\x80\x2f\x0c') + \
b_('\xa9\xfe\x64\x53\x69\x7a')
# Implementation of algorithm 3.2 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry, metadata_encrypt=True):
# 1. Pad or truncate the password string to exactly 32 bytes. If the
# password string is more than 32 bytes long, use only its first 32 bytes;
# if it is less than 32 bytes long, pad it by appending the required number
# of additional bytes from the beginning of the padding string
# (_encryption_padding).
password = b_((str_(password) + str_(_encryption_padding))[:32])
# 2. Initialize the MD5 hash function and pass the result of step 1 as
# input to this function.
import struct
m = md5(password)
# 3. Pass the value of the encryption dictionary's /O entry to the MD5 hash
# function.
m.update(owner_entry.original_bytes)
# 4. Treat the value of the /P entry as an unsigned 4-byte integer and pass
# these bytes to the MD5 hash function, low-order byte first.
p_entry = struct.pack('<i', p_entry)
m.update(p_entry)
# 5. Pass the first element of the file's file identifier array to the MD5
# hash function.
m.update(id1_entry.original_bytes)
# 6. (Revision 3 or greater) If document metadata is not being encrypted,
# pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function.
if rev >= 3 and not metadata_encrypt:
m.update(b_("\xff\xff\xff\xff"))
# 7. Finish the hash.
md5_hash = m.digest()
# 8. (Revision 3 or greater) Do the following 50 times: Take the output
# from the previous MD5 hash and pass the first n bytes of the output as
# input into a new MD5 hash, where n is the number of bytes of the
# encryption key as defined by the value of the encryption dictionary's
# /Length entry.
if rev >= 3:
for i in range(50):
md5_hash = md5(md5_hash[:keylen]).digest()
# 9. Set the encryption key to the first n bytes of the output from the
# final MD5 hash, where n is always 5 for revision 2 but, for revision 3 or
# greater, depends on the value of the encryption dictionary's /Length
# entry.
return md5_hash[:keylen]
# Implementation of algorithm 3.3 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg33(owner_pwd, user_pwd, rev, keylen):
# steps 1 - 4
key = _alg33_1(owner_pwd, rev, keylen)
# 5. Pad or truncate the user password string as described in step 1 of
# algorithm 3.2.
user_pwd = b_((user_pwd + str_(_encryption_padding))[:32])
# 6. Encrypt the result of step 5, using an RC4 encryption function with
# the encryption key obtained in step 4.
val = utils.RC4_encrypt(key, user_pwd)
# 7. (Revision 3 or greater) Do the following 19 times: Take the output
# from the previous invocation of the RC4 function and pass it as input to
# a new invocation of the function; use an encryption key generated by
# taking each byte of the encryption key obtained in step 4 and performing
# an XOR operation between that byte and the single-byte value of the
# iteration counter (from 1 to 19).
if rev >= 3:
for i in range(1, 20):
new_key = ''
for l in range(len(key)):
new_key += chr(ord_(key[l]) ^ i)
val = utils.RC4_encrypt(new_key, val)
# 8. Store the output from the final invocation of the RC4 as the value of
# the /O entry in the encryption dictionary.
return val
# Steps 1-4 of algorithm 3.3
def _alg33_1(password, rev, keylen):
# 1. Pad or truncate the owner password string as described in step 1 of
# algorithm 3.2. If there is no owner password, use the user password
# instead.
password = b_((password + str_(_encryption_padding))[:32])
# 2. Initialize the MD5 hash function and pass the result of step 1 as
# input to this function.
m = md5(password)
# 3. (Revision 3 or greater) Do the following 50 times: Take the output
# from the previous MD5 hash and pass it as input into a new MD5 hash.
md5_hash = m.digest()
if rev >= 3:
for i in range(50):
md5_hash = md5(md5_hash).digest()
# 4. Create an RC4 encryption key using the first n bytes of the output
# from the final MD5 hash, where n is always 5 for revision 2 but, for
# revision 3 or greater, depends on the value of the encryption
# dictionary's /Length entry.
key = md5_hash[:keylen]
return key
# Implementation of algorithm 3.4 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg34(password, owner_entry, p_entry, id1_entry):
# 1. Create an encryption key based on the user password string, as
# described in algorithm 3.2.
key = _alg32(password, 2, 5, owner_entry, p_entry, id1_entry)
# 2. Encrypt the 32-byte padding string shown in step 1 of algorithm 3.2,
# using an RC4 encryption function with the encryption key from the
# preceding step.
U = utils.RC4_encrypt(key, _encryption_padding)
# 3. Store the result of step 2 as the value of the /U entry in the
# encryption dictionary.
return U, key
# Implementation of algorithm 3.4 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg35(password, rev, keylen, owner_entry, p_entry, id1_entry, metadata_encrypt):
# 1. Create an encryption key based on the user password string, as
# described in Algorithm 3.2.
key = _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry)
# 2. Initialize the MD5 hash function and pass the 32-byte padding string
# shown in step 1 of Algorithm 3.2 as input to this function.
m = md5()
m.update(_encryption_padding)
# 3. Pass the first element of the file's file identifier array (the value
# of the ID entry in the document's trailer dictionary; see Table 3.13 on
# page 73) to the hash function and finish the hash. (See implementation
# note 25 in Appendix H.)
m.update(id1_entry.original_bytes)
md5_hash = m.digest()
# 4. Encrypt the 16-byte result of the hash, using an RC4 encryption
# function with the encryption key from step 1.
val = utils.RC4_encrypt(key, md5_hash)
# 5. Do the following 19 times: Take the output from the previous
# invocation of the RC4 function and pass it as input to a new invocation
# of the function; use an encryption key generated by taking each byte of
# the original encryption key (obtained in step 2) and performing an XOR
# operation between that byte and the single-byte value of the iteration
# counter (from 1 to 19).
for i in range(1, 20):
new_key = b_('')
for l in range(len(key)):
new_key += b_(chr(ord_(key[l]) ^ i))
val = utils.RC4_encrypt(new_key, val)
# 6. Append 16 bytes of arbitrary padding to the output from the final
# invocation of the RC4 function and store the 32-byte result as the value
# of the U entry in the encryption dictionary.
# (implementator note: I don't know what "arbitrary padding" is supposed to
# mean, so I have used null bytes. This seems to match a few other
# people's implementations)
return val + (b_('\x00') * 16), key
|
{
"content_hash": "a0c8189559966a46e9c05b1f5e98a3f8",
"timestamp": "",
"source": "github",
"line_count": 2971,
"max_line_length": 137,
"avg_line_length": 41.97542914843487,
"alnum_prop": 0.5595025218709155,
"repo_name": "Ashaba/rms",
"id": "9979414ff021c6bcfc9839e9f1575f3ffff8ecba",
"size": "126327",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "rmslocalenv/lib/python2.7/site-packages/PyPDF2/pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "650487"
},
{
"name": "HTML",
"bytes": "2109946"
},
{
"name": "JavaScript",
"bytes": "3041523"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "10446887"
},
{
"name": "Shell",
"bytes": "3332"
}
],
"symlink_target": ""
}
|
"""Example script to create a exe of the breakout example using py2exe.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import os
from distutils.core import setup
import py2exe
import pymunk
# Fix to make py2exe include some dlls it needs but doesnt include by default.
origIsSystemDLL = py2exe.build_exe.isSystemDLL # save the orginal before we edit it
def isSystemDLL(pathname):
# checks if the freetype and ogg dll files are being included
if os.path.basename(pathname).lower() in ("libfreetype-6.dll", "libogg-0.dll", "sdl_ttf.dll"):
return 0
return origIsSystemDLL(pathname) # return the orginal function
py2exe.build_exe.isSystemDLL = isSystemDLL # override the default function with this one
pymunk_dir = os.path.dirname(pymunk.__file__)
setup(
console=['breakout.py']
, data_files = [os.path.join(pymunk_dir, 'chipmunk.dll')]
, zipfile = None
, options = {"py2exe":{"bundle_files":1}}
)
|
{
"content_hash": "27eab9438ab00a787e95a2d5f989f0ff",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 98,
"avg_line_length": 31.225806451612904,
"alnum_prop": 0.7004132231404959,
"repo_name": "saintdragon2/python-3-lecture-2015",
"id": "903f0c250f1d9a7c30367c76f93d80abb6e0c50c",
"size": "968",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "civil_mid_mid/pymunk-4.0.0/examples/py2exe_setup__breakout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5095"
},
{
"name": "C",
"bytes": "460322"
},
{
"name": "C++",
"bytes": "104809"
},
{
"name": "CSS",
"bytes": "18133"
},
{
"name": "HTML",
"bytes": "1403533"
},
{
"name": "Java",
"bytes": "11586"
},
{
"name": "JavaScript",
"bytes": "57050"
},
{
"name": "Makefile",
"bytes": "5563"
},
{
"name": "PHP",
"bytes": "10268"
},
{
"name": "Python",
"bytes": "4462240"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.