text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'', include('main_site.urls', namespace="main_site", app_name="main_site")),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name='login'),
url(r'administration/', include(admin.site.urls), name="admin"),
url(r'^fonts/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': "%s/main_site/fonts" % settings.STATIC_ROOT,
}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
|
{
"content_hash": "8cc1924f942338cf313bb6c7d363bf0f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 35.095238095238095,
"alnum_prop": 0.6540027137042063,
"repo_name": "skoczen/artechetype",
"id": "a847a12e26a980f57e66be6daaf57641e11d2d13",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "68881"
},
{
"name": "JavaScript",
"bytes": "645"
},
{
"name": "Python",
"bytes": "44194"
}
],
"symlink_target": ""
}
|
import os
_ = gettext = lambda s: s
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__)) + '/../'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Warsaw'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pl'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '50qwz*@_w**hcydw&o&tlbw%041gs_ftw!qo%dog%)_0py#w9&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH) + '/templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'south',
'{{ project_name }}.apps.common',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
)
LANGUAGES = [
('pl', 'Polski'),
]
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
|
{
"content_hash": "14c8d10717e3cab2aff02f56245f57f0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 75,
"avg_line_length": 30.84269662921348,
"alnum_prop": 0.7260473588342441,
"repo_name": "makimo/django-project-template",
"id": "078fef86c27f7047c6fbf8ba9ad5c501692dca45",
"size": "2770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "3238"
},
{
"name": "Python",
"bytes": "7136"
}
],
"symlink_target": ""
}
|
process_name = 'tornadoircd'
##
# Local time
##
import pytz, datetime
localtime = pytz.timezone('America/Sao_Paulo')
now = datetime.datetime.now(localtime)
##
# IRCd
##
from datetime import datetime
ircd = {
'name': 'irc.testnetwork.org',
'date': str(now),
'listen': [('127.0.0.1', 6667)],
'motd': ['Welcome to my humble server!',
'This server is a test version of tornadoirc',
'If you find any issues, please report to the developer.'],
'nicklen': 30,
'chanlen': 50,
'pinginterval': 120,
'pingtimeout': 60,
'chanlimit': 100,
}
##
# Profiling
##
profiling = True
|
{
"content_hash": "8851ebbdd6521badcf3420e20b40551d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 20.258064516129032,
"alnum_prop": 0.6114649681528662,
"repo_name": "leandropls/tornadoirc",
"id": "debdcb16ce865ea1ff1e0a1318c86d24fe350d80",
"size": "661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "66818"
}
],
"symlink_target": ""
}
|
"""Pipelines for mapreduce library."""
__all__ = [
"MapperPipeline",
]
from mapreduce import control
from mapreduce import model
from mapreduce import parameters
from mapreduce import pipeline_base
# pylint: disable=g-bad-name
# pylint: disable=protected-access
class MapperPipeline(pipeline_base._OutputSlotsMixin,
pipeline_base.PipelineBase):
"""Pipeline wrapper for mapper job.
Args:
job_name: mapper job name as string
handler_spec: mapper handler specification as string.
input_reader_spec: input reader specification as string.
output_writer_spec: output writer specification as string.
params: mapper parameters for input reader and output writer as dict.
shards: number of shards in the job as int.
Returns:
default: the list of filenames produced by the mapper if there was any
output and the map was completed successfully.
result_status: one of model.MapreduceState._RESULTS.
job_id: mr id that can be used to query model.MapreduceState. Available
immediately after run returns.
"""
async = True
# TODO(user): we probably want to output counters too.
# Might also need to double filenames as named output.
output_names = [
# Job ID. MapreduceState.get_by_job_id can be used to load
# mapreduce state.
"job_id",
# Dictionary of final counter values. Filled when job is completed.
"counters"] + pipeline_base._OutputSlotsMixin.output_names
def run(self,
job_name,
handler_spec,
input_reader_spec,
output_writer_spec=None,
params=None,
shards=None,
base_path=None):
"""Start a mapreduce job.
Args:
job_name: mapreduce name. Only for display purpose.
handler_spec: fully qualified name to your map function/class.
input_reader_spec: fully qualified name to input reader class.
output_writer_spec: fully qualified name to output writer class.
params: a dictionary of parameters for input reader and output writer
initialization.
shards: number of shards. This provides a guide to mapreduce. The real
number of shards is determined by how input are splited.
"""
if shards is None:
shards = parameters.config.SHARD_COUNT
if base_path is None:
base_path = parameters.config.BASE_PATH
mapreduce_id = control.start_map(
job_name,
handler_spec,
input_reader_spec,
params or {},
mapreduce_parameters={
"done_callback": self.get_callback_url(),
"done_callback_method": "GET",
"pipeline_id": self.pipeline_id,
"base_path": base_path,
},
shard_count=shards,
output_writer_spec=output_writer_spec,
queue_name=self.queue_name,
)
self.fill(self.outputs.job_id, mapreduce_id)
self.set_status(console_url="%s/detail?mapreduce_id=%s" % (
(base_path, mapreduce_id)))
def try_cancel(self):
"""Always allow mappers to be canceled and retried."""
return True
def callback(self):
"""Callback after this async pipeline finishes."""
if self.was_aborted:
return
mapreduce_id = self.outputs.job_id.value
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
if mapreduce_state.result_status != model.MapreduceState.RESULT_SUCCESS:
self.retry("Job %s had status %s" % (
mapreduce_id, mapreduce_state.result_status))
return
mapper_spec = mapreduce_state.mapreduce_spec.mapper
outputs = []
output_writer_class = mapper_spec.output_writer_class()
if (output_writer_class and
mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):
outputs = output_writer_class.get_filenames(mapreduce_state)
self.fill(self.outputs.result_status, mapreduce_state.result_status)
self.fill(self.outputs.counters, mapreduce_state.counters_map.to_dict())
self.complete(outputs)
|
{
"content_hash": "7498d71a564e335f5394ce6cd8c71ce5",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 78,
"avg_line_length": 34.26495726495727,
"alnum_prop": 0.6727363432277376,
"repo_name": "Candreas/mapreduce",
"id": "3aad27fbd8520e8dda59668b7f3822df0366630a",
"size": "4628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/mapreduce/mapper_pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3112"
},
{
"name": "HTML",
"bytes": "11973"
},
{
"name": "Java",
"bytes": "1042871"
},
{
"name": "JavaScript",
"bytes": "58331"
},
{
"name": "Python",
"bytes": "1101302"
},
{
"name": "Shell",
"bytes": "2416"
}
],
"symlink_target": ""
}
|
from argus.scenarios import base
class CloudScenario(base.BaseScenario):
"""Base scenario class for testing Cloudbase-Init."""
service_type = 'http'
@classmethod
def prepare_recipe(cls):
"""Prepare the underlying recipe.
Prepare the recipe using custom behavior tailored to Cloudbase-Init.
"""
return cls.recipe.prepare(service_type=cls.service_type)
|
{
"content_hash": "bd4795f16f506f7e6b5873609c93b1e5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 24.88235294117647,
"alnum_prop": 0.6572104018912529,
"repo_name": "stefan-caraiman/cloudbase-init-ci",
"id": "472ac48c5e5669f8ee8349998f57e066dee4fe8b",
"size": "1075",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "argus/scenarios/cloud/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "292"
},
{
"name": "PowerShell",
"bytes": "17366"
},
{
"name": "Python",
"bytes": "529051"
}
],
"symlink_target": ""
}
|
def value(colors):
pass
|
{
"content_hash": "4d114af24d5365afdba38856f81583ba",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 18,
"avg_line_length": 14,
"alnum_prop": 0.6428571428571429,
"repo_name": "exercism/xpython",
"id": "f83a1c237cb31b7325a803fc7d971f24f566623c",
"size": "28",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "exercises/resistor-color-duo/resistor_color_duo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "268401"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
}
|
"""Tests for `models.py` (model cloning, mainly)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import metrics
from tensorflow.python.keras import models
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class TestModel(keras.Model):
"""A model subclass."""
def __init__(self, n_outputs=4, trainable=True):
"""A test class with one dense layer and number of outputs as a variable."""
super(TestModel, self).__init__()
self.layer1 = keras.layers.Dense(n_outputs)
self.n_outputs = resource_variable_ops.ResourceVariable(
n_outputs, trainable=trainable)
def call(self, x):
return self.layer1(x)
class TestModelCloning(test.TestCase):
def test_clone_sequential_model(self):
with self.cached_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
# Everything should work in a new session.
keras.backend.clear_session()
with self.cached_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
# update ops from batch norm needs to be included
self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new tensor
input_a = keras.Input(shape=(4,))
new_model = keras.models.clone_model(
model, input_tensors=input_a)
self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new, non-Keras tensor
input_a = keras.backend.variable(val_a)
new_model = keras.models.clone_model(
model, input_tensors=input_a)
self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_clone_functional_model(self):
with self.cached_session():
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# Everything should work in a new session.
keras.backend.clear_session()
with self.cached_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
self.assertEquals(len(new_model.get_updates_for(new_model.inputs)), 2)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
@test_util.run_in_graph_and_eager_modes
def test_clone_functional_model_with_masking(self):
with self.cached_session():
x = np.array([[[1], [1]], [[0], [0]]])
inputs = keras.Input((2, 1))
outputs = keras.layers.Masking(mask_value=0)(inputs)
outputs = keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))(outputs)
model = keras.Model(inputs, outputs)
model = keras.models.clone_model(model)
model.compile(loss='mse', optimizer=adam.AdamOptimizer(0.01))
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_optimizer_dependency(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=(4,)))
opt = adam.AdamOptimizer(0.01)
model.compile(optimizer=opt, loss='mse')
model.fit(x=np.array([[1., 2., 3., 4.]]), y=[1.], epochs=2)
save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
beta1_power, _ = opt._get_beta_accumulators()
self.evaluate(beta1_power.assign(12.))
model.save_weights(save_prefix)
self.evaluate(beta1_power.assign(13.))
model.load_weights(save_prefix)
self.assertEqual(12., self.evaluate(beta1_power))
class TestModelBackend(test.TestCase):
def test_model_backend_float64_use_cases(self):
# Test case for GitHub issue 19318
floatx = keras.backend.floatx()
keras.backend.set_floatx('float64')
x = keras.Input((5,))
y = keras.layers.Dense(1)(x)
model = keras.models.Model(x, y)
model.compile('rmsprop', 'mse')
keras.backend.set_floatx(floatx)
class TestModelDeepCopy(test.TestCase):
def test_deep_copy_eager_mode_trainable(self):
with context.eager_mode():
x = random_ops.random_normal((32, 4))
model = TestModel(trainable=True)
model(x) # Initialize Variables.
model_copy = copy.deepcopy(model)
self.assertEqual(len(model_copy.trainable_variables), 3)
model_copy.n_outputs.assign(1200)
self.assertFalse(
np.allclose(model_copy.n_outputs.numpy(),
model.n_outputs.numpy()))
def test_deep_copy_eager_mode_not_trainable(self):
with context.eager_mode():
x = random_ops.random_normal((32, 4))
model = TestModel(trainable=False)
model(x)
model_copy = copy.deepcopy(model)
self.assertEqual(len(model_copy.trainable_variables), 2)
weights = model_copy.get_weights()
weights = [w * 4 for w in weights]
model_copy.set_weights(weights)
self.assertFalse(
np.allclose(model.get_weights()[0],
model_copy.get_weights()[0]))
class TestCloneAndBuildModel(test.TestCase):
def test_clone_and_build_non_compiled_model(self):
with self.cached_session():
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
# Everything should work in a new session.
keras.backend.clear_session()
with self.cached_session():
# With placeholder creation
new_model = models.clone_and_build_model(model, compile_clone=True)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(inp, out)
# Create new tensors for inputs and targets
input_a = keras.Input(shape=(4,))
target_a = keras.Input(shape=(4,))
new_model = models.clone_and_build_model(model, input_tensors=input_a,
target_tensors=[target_a],
compile_clone=True)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.evaluate(inp, out)
with self.assertRaisesRegexp(RuntimeError, 'must compile'):
new_model.train_on_batch(inp, out)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(inp, out)
def _assert_same_compile_params(self, model):
"""Assert that two models have the same compile parameters."""
self.assertEqual('mse', model.loss)
self.assertTrue(
isinstance(model.optimizer, keras.optimizers.RMSprop))
self.assertEqual(['acc', metrics.categorical_accuracy], model.metrics)
def _clone_and_build_test_helper(self, model, is_subclassed=False):
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
# Everything should work in a new session.
keras.backend.clear_session()
with self.cached_session():
# With placeholder creation
new_model = models.clone_and_build_model(
model, compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
# Create new tensors for inputs and targets
input_a = keras.Input(shape=(4,), name='a')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, compile_clone=True,
in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
target_a = keras.Input(shape=(4,), name='b')
new_model = models.clone_and_build_model(
model, input_tensors=input_a, target_tensors=[target_a],
compile_clone=True, in_place_reset=is_subclassed)
self._assert_same_compile_params(new_model)
new_model.train_on_batch(inp, out)
new_model.evaluate(inp, out)
def test_clone_and_build_compiled_sequential_model(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
model.compile('rmsprop', 'mse',
metrics=['acc', metrics.categorical_accuracy])
self._clone_and_build_test_helper(model)
def test_clone_and_build_functional_model(self):
with self.cached_session():
input_a = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_a = dense_2(x_a)
model = keras.models.Model(input_a, x_a)
model.compile('rmsprop', 'mse',
metrics=['acc', metrics.categorical_accuracy])
self._clone_and_build_test_helper(model)
def test_clone_and_build_subclassed_model(self):
class SubclassedModel(keras.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.layer1 = keras.layers.Dense(4)
self.layer2 = keras.layers.Dense(4)
def call(self, inp):
out = self.layer1(inp)
out = keras.layers.BatchNormalization()(out)
out = keras.layers.Dropout(0.5)(out)
out = self.layer2(out)
return out
with self.cached_session():
model = SubclassedModel()
model.compile('rmsprop', 'mse',
metrics=['acc', metrics.categorical_accuracy])
self._clone_and_build_test_helper(model, True)
def assert_optimizer_iterations_increases(self, optimizer):
with self.cached_session():
input_a = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_a = keras.layers.BatchNormalization()(x_a)
x_a = dense_2(x_a)
model = keras.models.Model(input_a, x_a)
model.compile(optimizer, 'mse',
metrics=['acc', metrics.categorical_accuracy])
global_step = keras.backend.variable(123, dtype=dtypes.int64)
clone_model = models.clone_and_build_model(
model, compile_clone=True, optimizer_iterations=global_step)
inp = np.random.random((10, 4))
out = np.random.random((10, 4))
clone_model.train_on_batch(inp, out)
self.assertEqual(K.eval(global_step), 124)
def test_replace_tf_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01))
def test_replace_keras_optimizer_iterations_variable(self):
self.assert_optimizer_iterations_increases('adam')
if __name__ == '__main__':
test.main()
|
{
"content_hash": "14adcefabfcd39af76b3b8bee248be6a",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 80,
"avg_line_length": 36.2609819121447,
"alnum_prop": 0.6508943205301788,
"repo_name": "girving/tensorflow",
"id": "c550caeb80a2471d9b35e3a28213e2068890c63c",
"size": "14722",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/models_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import sys
from tieslib.tieslib import (
prompt, setup_ties_environment
)
from interp.interp import newenv_with_preload, driver_loop
def input_prompt():
return '%s\n> ' % prompt()
### Load and save history file ############################
import os
import readline
histfile = os.path.join(os.path.expanduser('~'), '.ties_history')
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
def write_history():
try:
readline.write_history_file(histfile)
except IOError:
pass
atexit.register(write_history)
###########################################################
newenv = newenv_with_preload(setup_ties_environment, sys.argv[1:])
print 'TiES Interpreter Version 0.0'
print 'Copyleft (c) balabala'
print
driver_loop(newenv, input_prompt)
|
{
"content_hash": "4b57eb44539e9fcaa8e5d627d17b36a5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 66,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.6381660470879802,
"repo_name": "sKabYY/es_utilities",
"id": "8f7333ef10149226ba0c840567a50587cb65913d",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/core",
"path": "tiesh.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63184"
},
{
"name": "Scheme",
"bytes": "305"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
from typing import Any, Dict
from .model import Error
class ISignError(Exception):
def __init__(self, method: str, path: str, status_code: int, response: Dict[str, Any]) -> None:
super(ISignError, self).__init__()
self.method = method
self.path = path
self.status_code = status_code
self.error = Error(response)
def __str__(self) -> str:
return f"ISignError: {self.method} {self.path} -> {self.status_code}: {self.error}"
|
{
"content_hash": "df1de09e2fb59c0385cea0891fc48e2b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 99,
"avg_line_length": 32.13333333333333,
"alnum_prop": 0.6161825726141079,
"repo_name": "Paulius-Maruska/python-isign",
"id": "28503165d0748c1d208569a33d6bd23b18dba408",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/isign/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70183"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import unittest
import sys
from IECore import *
import os
class TestEXRReader(unittest.TestCase):
def testFactoryConstruction( self ) :
r = Reader.create( "test/IECore/data/exrFiles/AllHalfValues.exr" )
self.assertEqual( type( r ), EXRImageReader )
def testCanReadAndIsComplete( self ) :
self.assert_( EXRImageReader.canRead( "test/IECore/data/exrFiles/AllHalfValues.exr" ) )
self.assert_( not EXRImageReader.canRead( "thisFileDoesntExist.exr" ) )
r = EXRImageReader( "test/IECore/data/exrFiles/AllHalfValues.exr" )
self.assert_( r.isComplete() )
r = EXRImageReader( "test/IECore/data/exrFiles/incomplete.exr" )
self.assert_( not r.isComplete() )
r = EXRImageReader( "thisFileDoesntExist.exr" )
self.assert_( not r.isComplete() )
def testChannelNames( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/AllHalfValues.exr" )
c = r.channelNames()
self.assert_( c.staticTypeId()==StringVectorData.staticTypeId() )
self.assert_( len( c ), 3 )
self.assert_( "R" in c )
self.assert_( "G" in c )
self.assert_( "B" in c )
r = EXRImageReader( "test/IECore/data/exrFiles/manyChannels.exr" )
c = r.channelNames()
self.assert_( c.staticTypeId()==StringVectorData.staticTypeId() )
self.assert_( len( c ), 7 )
self.assert_( "R" in c )
self.assert_( "G" in c )
self.assert_( "B" in c )
self.assert_( "A" in c )
self.assert_( "diffuse.red" in c )
self.assert_( "diffuse.green" in c )
self.assert_( "diffuse.blue" in c )
r = EXRImageReader( "thisFileDoesntExist.exr" )
self.assertRaises( Exception, r.channelNames )
def testReadHeader( self ):
r = EXRImageReader( "test/IECore/data/exrFiles/manyChannels.exr" )
h = r.readHeader()
c = h['channelNames']
self.assert_( c.staticTypeId()==StringVectorData.staticTypeId() )
self.assert_( len( c ), 7 )
self.assert_( "R" in c )
self.assert_( "G" in c )
self.assert_( "B" in c )
self.assert_( "A" in c )
self.assert_( "diffuse.red" in c )
self.assert_( "diffuse.green" in c )
self.assert_( "diffuse.blue" in c )
self.assertEqual( h['displayWindow'], Box2iData( Box2i( V2i(0,0), V2i(255,255) ) ) )
self.assertEqual( h['dataWindow'], Box2iData( Box2i( V2i(0,0), V2i(255,255) ) ) )
def testDataAndDisplayWindows( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/AllHalfValues.exr" )
self.assertEqual( r.dataWindow(), Box2i( V2i( 0 ), V2i( 255 ) ) )
self.assertEqual( r.displayWindow(), Box2i( V2i( 0 ), V2i( 255 ) ) )
r = EXRImageReader( "test/IECore/data/exrFiles/uvMapWithDataWindow.100x100.exr" )
self.assertEqual( r.dataWindow(), Box2i( V2i( 25 ), V2i( 49 ) ) )
self.assertEqual( r.displayWindow(), Box2i( V2i( 0 ), V2i( 99 ) ) )
r = EXRImageReader( "thisFileDoesntExist.exr" )
self.assertRaises( Exception, r.dataWindow )
self.assertRaises( Exception, r.displayWindow )
def testReadImage( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMap.256x256.exr" )
i = r.read()
self.assertEqual( i.typeId(), ImagePrimitive.staticTypeId() )
self.assertEqual( i.dataWindow, Box2i( V2i( 0 ), V2i( 255 ) ) )
self.assertEqual( i.displayWindow, Box2i( V2i( 0 ), V2i( 255 ) ) )
self.assert_( i.arePrimitiveVariablesValid() )
self.assertEqual( len( i ), 3 )
for c in ["R", "G", "B"] :
self.assertEqual( i[c].data.typeId(), FloatVectorData.staticTypeId() )
r = i["R"].data
self.assertEqual( r[0], 0 )
self.assertEqual( r[-1], 1 )
g = i["G"].data
self.assertEqual( r[0], 0 )
self.assertEqual( r[-1], 1 )
for b in i["B"].data :
self.assertEqual( b, 0 )
def testReadIndividualChannels( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMap.256x256.exr" )
i = r.read()
for c in ["R", "G", "B"] :
cd = r.readChannel( c )
self.assertEqual( i[c].data, cd )
def testReadWithChangedDisplayWindow( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMap.256x256.exr" )
i1 = r.read()
r.parameters()["displayWindow"].setTypedValue( Box2i( V2i( -1000, -10 ), V2i( 1000, 10 ) ) )
i2 = r.read()
self.assertEqual( i2.displayWindow, Box2i( V2i( -1000, -10 ), V2i( 1000, 10 ) ) )
self.assertEqual( i2.dataWindow, Box2i( V2i( 0 ), V2i( 255 ) ) )
i2.displayWindow = i1.displayWindow
self.assertEqual( i1, i2 )
def testReadInvalidDataWindow( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMap.512x256.exr" )
r.parameters()["dataWindow"].setTypedValue( Box2i( V2i( -1 ), V2i( 511, 255 ) ) )
self.assertRaises( Exception, r.read )
def testReadHorizontalSlices( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMap.512x256.exr" )
iWhole = r.read()
# read and test a horizontal slice starting at y==0
r.parameters()["dataWindow"].setTypedValue( Box2i( V2i( 0, 0 ), V2i( 511, 100 ) ) )
iSliced = r.read()
self.assertEqual( iSliced.dataWindow, Box2i( V2i( 0, 0 ), V2i( 511, 100 ) ) )
self.assertEqual( iSliced.displayWindow, Box2i( V2i( 0, 0 ), V2i( 511, 255 ) ) )
self.assert_( iSliced.arePrimitiveVariablesValid() )
for i in range( 0, len( iSliced["R"].data ) ) :
self.assertEqual( iSliced["R"].data[i], iWhole["R"].data[i] )
self.assertEqual( iSliced["G"].data[i], iWhole["G"].data[i] )
self.assertEqual( iSliced["B"].data[i], iWhole["B"].data[i] )
# read and test a horizontal slice ending at the end of the image
r.parameters()["dataWindow"].setTypedValue( Box2i( V2i( 0, 200 ), V2i( 511, 255 ) ) )
iSliced = r.read()
self.assertEqual( iSliced.dataWindow, Box2i( V2i( 0, 200 ), V2i( 511, 255 ) ) )
self.assertEqual( iSliced.displayWindow, Box2i( V2i( 0, 0 ), V2i( 511, 255 ) ) )
for i in range( -1, -len( iSliced["R"].data ) ) :
self.assertEqual( iSliced["R"].data[i], iWhole["R"].data[i] )
self.assertEqual( iSliced["G"].data[i], iWhole["G"].data[i] )
self.assertEqual( iSliced["B"].data[i], iWhole["B"].data[i] )
def testReadArbitrarySlice( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMap.512x256.exr" )
iWhole = r.read()
r.parameters()["dataWindow"].setTypedValue( Box2i( V2i( 10, 10 ), V2i( 100, 150 ) ) )
iSliced = r.read()
self.assertEqual( iSliced.displayWindow, Box2i( V2i( 0, 0 ), V2i( 511, 255 ) ) )
self.assertEqual( iSliced.dataWindow, Box2i( V2i( 10, 10 ), V2i( 100, 150 ) ) )
self.assert_( iSliced.arePrimitiveVariablesValid() )
wholeEvaluator = PrimitiveEvaluator.create( iWhole )
slicedEvaluator = PrimitiveEvaluator.create( iSliced )
wholeResult = wholeEvaluator.createResult()
slicedResult = slicedEvaluator.createResult()
wholeR = wholeEvaluator.R()
wholeG = wholeEvaluator.G()
wholeB = wholeEvaluator.B()
slicedR = slicedEvaluator.R()
slicedG = slicedEvaluator.G()
slicedB = slicedEvaluator.B()
for x in range( 10, 101 ) :
for y in range( 10, 151 ) :
wholeEvaluator.pointAtPixel( V2i( x, y ), wholeResult )
slicedEvaluator.pointAtPixel( V2i( x, y ), slicedResult )
self.assertEqual( wholeResult.floatPrimVar( wholeR ), slicedResult.floatPrimVar( slicedR ) )
self.assertEqual( wholeResult.floatPrimVar( wholeG ), slicedResult.floatPrimVar( slicedG ) )
self.assertEqual( wholeResult.floatPrimVar( wholeB ), slicedResult.floatPrimVar( slicedB ) )
def testNonZeroDataWindowOrigin( self ) :
r = EXRImageReader( "test/IECore/data/exrFiles/uvMapWithDataWindow.100x100.exr" )
i = r.read()
self.assertEqual( i.dataWindow, Box2i( V2i( 25 ), V2i( 49 ) ) )
self.assertEqual( i.displayWindow, Box2i( V2i( 0 ), V2i( 99 ) ) )
self.assert_( i.arePrimitiveVariablesValid() )
r.parameters()["dataWindow"].setTypedValue( Box2i( V2i( 30 ), V2i( 40 ) ) )
iSliced = r.read()
self.assertEqual( iSliced.dataWindow, Box2i( V2i( 30 ), V2i( 40 ) ) )
self.assertEqual( iSliced.displayWindow, Box2i( V2i( 0 ), V2i( 99 ) ) )
wholeEvaluator = PrimitiveEvaluator.create( i )
slicedEvaluator = PrimitiveEvaluator.create( iSliced )
wholeResult = wholeEvaluator.createResult()
slicedResult = slicedEvaluator.createResult()
wholeR = wholeEvaluator.R()
wholeG = wholeEvaluator.G()
wholeB = wholeEvaluator.B()
slicedR = slicedEvaluator.R()
slicedG = slicedEvaluator.G()
slicedB = slicedEvaluator.B()
for x in range( 30, 41 ) :
for y in range( 30, 41 ) :
wholeEvaluator.pointAtPixel( V2i( x, y ), wholeResult )
slicedEvaluator.pointAtPixel( V2i( x, y ), slicedResult )
self.assertAlmostEqual( wholeResult.floatPrimVar( wholeR ), slicedResult.floatPrimVar( slicedR ), 4 )
self.assertAlmostEqual( wholeResult.floatPrimVar( wholeG ), slicedResult.floatPrimVar( slicedG ), 4 )
self.assertAlmostEqual( wholeResult.floatPrimVar( wholeB ), slicedResult.floatPrimVar( slicedB ), 4 )
def testOrientation( self ) :
img = Reader.create( "test/IECore/data/exrFiles/uvMap.512x256.exr" ).read()
ipe = PrimitiveEvaluator.create( img )
r = ipe.createResult()
pointColors = {
V2i(0, 0) : V3f( 0, 0, 0 ),
V2i(511, 0) : V3f( 1, 0, 0 ),
V2i(0, 255) : V3f( 0, 1, 0 ),
V2i(511, 255) : V3f( 1, 1, 0 ),
}
for point, expectedColor in pointColors.items() :
ipe.pointAtPixel( point, r )
color = V3f( r.floatPrimVar( ipe.R() ), r.floatPrimVar( ipe.G() ), r.floatPrimVar( ipe.B() ) )
self.assert_( ( color - expectedColor).length() < 1.e-6 )
def testReadIncompleteImage( self ) :
with CapturingMessageHandler() as m :
r = EXRImageReader( "test/IECore/data/exrFiles/incomplete.exr" )
i = r.read()
# check image is valid even if not all data is present
self.assertEqual( len( i ), 3 )
self.assert_( "R" in i )
self.assert_( "G" in i )
self.assert_( "B" in i )
self.assert_( i.arePrimitiveVariablesValid() )
# check a warning message has been output for each channel
self.assertEqual( len( m.messages ), 3 )
self.assertEqual( m.messages[0].level, Msg.Level.Warning )
self.assertEqual( m.messages[1].level, Msg.Level.Warning )
self.assertEqual( m.messages[2].level, Msg.Level.Warning )
def testHeaderToBlindData( self ) :
dictHeader = {
'screenWindowCenter': V2fData( V2f(0,0) ),
'displayWindow': Box2iData( Box2i( V2i(0,0), V2i(511,255) ) ),
'dataWindow': Box2iData( Box2i( V2i(0,0), V2i(511,255) ) ),
'pixelAspectRatio': FloatData( 1 ),
'screenWindowWidth': FloatData( 1 ),
}
r = Reader.create( "test/IECore/data/exrFiles/uvMap.512x256.exr" )
header = r.readHeader()
del header['channelNames']
self.assertEqual( header, CompoundObject(dictHeader) )
img = r.read()
self.assertEqual( img.blindData(), CompoundData(dictHeader) )
def testTimeCodeInHeader( self ) :
r = Reader.create( "test/IECore/data/exrFiles/uvMap.512x256.exr" )
header = r.readHeader()
self.failUnless( "timeCode" not in header )
img = r.read()
self.failUnless( "timeCode" not in img.blindData() )
td = TimeCodeData( TimeCode( 12, 5, 3, 15, dropFrame = True, bgf1 = True, binaryGroup6 = 12 ) )
img2 = img.copy()
img2.blindData()["timeCode"] = td
w = Writer.create( img2, "test/IECore/data/exrFiles/testTimeCode.exr" )
w.write()
r2 = Reader.create( "test/IECore/data/exrFiles/testTimeCode.exr" )
header = r2.readHeader()
self.failUnless( "timeCode" in header )
self.assertEqual( header["timeCode"], td )
img3 = r2.read()
self.failUnless( "timeCode" in img3.blindData() )
self.assertEqual( img3.blindData()["timeCode"], td )
self.assertEqual( img2.blindData(), img3.blindData() )
del img3.blindData()["timeCode"]
self.assertEqual( img.blindData(), img3.blindData() )
if os.path.isfile( "test/IECore/data/exrFiles/testTimeCode.exr" ) :
os.remove( "test/IECore/data/exrFiles/testTimeCode.exr" )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ed8a9117bcf3911c03f73dddadaf388b",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 105,
"avg_line_length": 34.39823008849557,
"alnum_prop": 0.6724123145527828,
"repo_name": "tectronics/cortex-vfx",
"id": "69a076a183b37c011d00187f25e0b35c5445de21",
"size": "13450",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/IECore/EXRImageReader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "65905"
},
{
"name": "C++",
"bytes": "10534690"
},
{
"name": "CMake",
"bytes": "14161"
},
{
"name": "GLSL",
"bytes": "31102"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "1859"
},
{
"name": "Python",
"bytes": "4463622"
},
{
"name": "Slash",
"bytes": "7896"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
}
|
import copy
from collections import namedtuple
import ctypes
import re
import numpy as np
from numba.core.typing.templates import ConcreteTemplate
from numba.core import types, config, compiler
from .hlc import hlc
from .hsadrv import devices, driver, enums, drvapi
from .hsadrv.error import HsaKernelLaunchError
from numba.roc import gcn_occupancy
from numba.roc.hsadrv.driver import hsa, dgpu_present
from .hsadrv import devicearray
from numba.core.typing.templates import AbstractTemplate
from numba.core.compiler_lock import global_compiler_lock
@global_compiler_lock
def compile_hsa(pyfunc, return_type, args, debug):
# First compilation will trigger the initialization of the HSA backend.
from .descriptor import HSATargetDesc
typingctx = HSATargetDesc.typingctx
targetctx = HSATargetDesc.targetctx
# TODO handle debug flag
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.no_compile = True
flags.no_cpython_wrapper = True
flags.no_cfunc_wrapper = True
flags.nrt = False
# Run compilation pipeline
cres = compiler.compile_extra(typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={})
# Linking depending libraries
# targetctx.link_dependencies(cres.llvm_module, cres.target_context.linking)
library = cres.library
library.finalize()
return cres
def compile_kernel(pyfunc, args, debug=False):
cres = compile_hsa(pyfunc, types.void, args, debug=debug)
func = cres.library.get_function(cres.fndesc.llvm_func_name)
kernel = cres.target_context.prepare_hsa_kernel(func, cres.signature.args)
hsakern = HSAKernel(llvm_module=kernel.module,
name=kernel.name,
argtypes=cres.signature.args)
return hsakern
def compile_device(pyfunc, return_type, args, debug=False):
cres = compile_hsa(pyfunc, return_type, args, debug=debug)
func = cres.library.get_function(cres.fndesc.llvm_func_name)
cres.target_context.mark_hsa_device(func)
devfn = DeviceFunction(cres)
class device_function_template(ConcreteTemplate):
key = devfn
cases = [cres.signature]
cres.typing_context.insert_user_function(devfn, device_function_template)
libs = [cres.library]
cres.target_context.insert_user_function(devfn, cres.fndesc, libs)
return devfn
def compile_device_template(pyfunc):
"""Compile a DeviceFunctionTemplate
"""
from .descriptor import HSATargetDesc
dft = DeviceFunctionTemplate(pyfunc)
class device_function_template(AbstractTemplate):
key = dft
def generic(self, args, kws):
assert not kws
return dft.compile(args)
typingctx = HSATargetDesc.typingctx
typingctx.insert_user_function(dft, device_function_template)
return dft
class DeviceFunctionTemplate(object):
"""Unmaterialized device function
"""
def __init__(self, pyfunc, debug=False):
self.py_func = pyfunc
self.debug = debug
# self.inline = inline
self._compileinfos = {}
def compile(self, args):
"""Compile the function for the given argument types.
Each signature is compiled once by caching the compiled function inside
this object.
"""
if args not in self._compileinfos:
cres = compile_hsa(self.py_func, None, args, debug=self.debug)
func = cres.library.get_function(cres.fndesc.llvm_func_name)
cres.target_context.mark_hsa_device(func)
first_definition = not self._compileinfos
self._compileinfos[args] = cres
libs = [cres.library]
if first_definition:
# First definition
cres.target_context.insert_user_function(self, cres.fndesc,
libs)
else:
cres.target_context.add_user_function(self, cres.fndesc, libs)
else:
cres = self._compileinfos[args]
return cres.signature
class DeviceFunction(object):
def __init__(self, cres):
self.cres = cres
def _ensure_list(val):
if not isinstance(val, (tuple, list)):
return [val]
else:
return list(val)
def _ensure_size_or_append(val, size):
n = len(val)
for _ in range(n, size):
val.append(1)
class HSAKernelBase(object):
"""Define interface for configurable kernels
"""
def __init__(self):
self.global_size = (1,)
self.local_size = (1,)
self.stream = None
def copy(self):
return copy.copy(self)
def configure(self, global_size, local_size=None, stream=None):
"""Configure the OpenCL kernel
local_size can be None
"""
global_size = _ensure_list(global_size)
if local_size is not None:
local_size = _ensure_list(local_size)
size = max(len(global_size), len(local_size))
_ensure_size_or_append(global_size, size)
_ensure_size_or_append(local_size, size)
clone = self.copy()
clone.global_size = tuple(global_size)
clone.local_size = tuple(local_size) if local_size else None
clone.stream = stream
return clone
def forall(self, nelem, local_size=64, stream=None):
"""Simplified configuration for 1D kernel launch
"""
return self.configure(nelem, min(nelem, local_size), stream=stream)
def __getitem__(self, args):
"""Mimick CUDA python's square-bracket notation for configuration.
This assumes a the argument to be:
`griddim, blockdim, stream`
The blockdim maps directly to local_size.
The actual global_size is computed by multiplying the local_size to
griddim.
"""
griddim = _ensure_list(args[0])
blockdim = _ensure_list(args[1])
size = max(len(griddim), len(blockdim))
_ensure_size_or_append(griddim, size)
_ensure_size_or_append(blockdim, size)
# Compute global_size
gs = [g * l for g, l in zip(griddim, blockdim)]
return self.configure(gs, blockdim, *args[2:])
_CacheEntry = namedtuple("_CachedEntry", ['symbol', 'executable',
'kernarg_region'])
class _CachedProgram(object):
def __init__(self, entry_name, binary):
self._entry_name = entry_name
self._binary = binary
# key: hsa context
self._cache = {}
def get(self):
ctx = devices.get_context()
result = self._cache.get(ctx)
# The program does not exist as GCN yet.
if result is None:
# generate GCN
symbol = '{0}'.format(self._entry_name)
agent = ctx.agent
ba = bytearray(self._binary)
bblob = ctypes.c_byte * len(self._binary)
bas = bblob.from_buffer(ba)
code_ptr = drvapi.hsa_code_object_t()
driver.hsa.hsa_code_object_deserialize(
ctypes.addressof(bas),
len(self._binary),
None,
ctypes.byref(code_ptr)
)
code = driver.CodeObject(code_ptr)
ex = driver.Executable()
ex.load(agent, code)
ex.freeze()
symobj = ex.get_symbol(agent, symbol)
regions = agent.regions.globals
for reg in regions:
if reg.host_accessible:
if reg.supports(enums.HSA_REGION_GLOBAL_FLAG_KERNARG):
kernarg_region = reg
break
assert kernarg_region is not None
# Cache the GCN program
result = _CacheEntry(symbol=symobj, executable=ex,
kernarg_region=kernarg_region)
self._cache[ctx] = result
return ctx, result
class HSAKernel(HSAKernelBase):
"""
A HSA kernel object
"""
def __init__(self, llvm_module, name, argtypes):
super(HSAKernel, self).__init__()
self._llvm_module = llvm_module
self.assembly, self.binary = self._generateGCN()
self.entry_name = name
self.argument_types = tuple(argtypes)
self._argloc = []
# cached program
self._cacheprog = _CachedProgram(entry_name=self.entry_name,
binary=self.binary)
self._parse_kernel_resource()
def _parse_kernel_resource(self):
"""
Temporary workaround for register limit
"""
m = re.search(r"\bwavefront_sgpr_count\s*=\s*(\d+)", self.assembly)
self._wavefront_sgpr_count = int(m.group(1))
m = re.search(r"\bworkitem_vgpr_count\s*=\s*(\d+)", self.assembly)
self._workitem_vgpr_count = int(m.group(1))
def _sentry_resource_limit(self):
# only check resource factprs if either sgpr or vgpr is non-zero
#if (self._wavefront_sgpr_count > 0 or self._workitem_vgpr_count > 0):
group_size = np.prod(self.local_size)
limits = gcn_occupancy.get_limiting_factors(
group_size=group_size,
vgpr_per_workitem=self._workitem_vgpr_count,
sgpr_per_wave=self._wavefront_sgpr_count)
if limits.reasons:
fmt = 'insufficient resources to launch kernel due to:\n{}'
msg = fmt.format('\n'.join(limits.suggestions))
raise HsaKernelLaunchError(msg)
def _generateGCN(self):
hlcmod = hlc.Module()
hlcmod.load_llvm(str(self._llvm_module))
return hlcmod.generateGCN()
def bind(self):
"""
Bind kernel to device
"""
ctx, entry = self._cacheprog.get()
if entry.symbol.kernarg_segment_size > 0:
sz = ctypes.sizeof(ctypes.c_byte) *\
entry.symbol.kernarg_segment_size
kernargs = entry.kernarg_region.allocate(sz)
else:
kernargs = None
return ctx, entry.symbol, kernargs, entry.kernarg_region
def __call__(self, *args):
self._sentry_resource_limit()
ctx, symbol, kernargs, kernarg_region = self.bind()
# Unpack pyobject values into ctypes scalar values
expanded_values = []
# contains lambdas to execute on return
retr = []
for ty, val in zip(self.argument_types, args):
_unpack_argument(ty, val, expanded_values, retr)
# Insert kernel arguments
base = 0
for av in expanded_values:
# Adjust for alignment
align = ctypes.sizeof(av)
pad = _calc_padding_for_alignment(align, base)
base += pad
# Move to offset
offseted = kernargs.value + base
asptr = ctypes.cast(offseted, ctypes.POINTER(type(av)))
# Assign value
asptr[0] = av
# Increment offset
base += align
# Actual Kernel launch
qq = ctx.default_queue
if self.stream is None:
hsa.implicit_sync()
# Dispatch
signal = None
if self.stream is not None:
signal = hsa.create_signal(1)
qq.insert_barrier(self.stream._get_last_signal())
qq.dispatch(symbol, kernargs, workgroup_size=self.local_size,
grid_size=self.global_size, signal=signal)
if self.stream is not None:
self.stream._add_signal(signal)
# retrieve auto converted arrays
for wb in retr:
wb()
# Free kernel region
if kernargs is not None:
if self.stream is None:
kernarg_region.free(kernargs)
else:
self.stream._add_callback(lambda: kernarg_region.free(kernargs))
def _unpack_argument(ty, val, kernelargs, retr):
"""
Convert arguments to ctypes and append to kernelargs
"""
if isinstance(ty, types.Array):
c_intp = ctypes.c_ssize_t
# if a dgpu is present, move the data to the device.
if dgpu_present:
devary, conv = devicearray.auto_device(val, devices.get_context())
if conv:
retr.append(lambda: devary.copy_to_host(val))
data = devary.device_ctypes_pointer
else:
data = ctypes.c_void_p(val.ctypes.data)
meminfo = parent = ctypes.c_void_p(0)
nitems = c_intp(val.size)
itemsize = c_intp(val.dtype.itemsize)
kernelargs.append(meminfo)
kernelargs.append(parent)
kernelargs.append(nitems)
kernelargs.append(itemsize)
kernelargs.append(data)
for ax in range(val.ndim):
kernelargs.append(c_intp(val.shape[ax]))
for ax in range(val.ndim):
kernelargs.append(c_intp(val.strides[ax]))
elif isinstance(ty, types.Integer):
cval = getattr(ctypes, "c_%s" % ty)(val)
kernelargs.append(cval)
elif ty == types.float64:
cval = ctypes.c_double(val)
kernelargs.append(cval)
elif ty == types.float32:
cval = ctypes.c_float(val)
kernelargs.append(cval)
elif ty == types.boolean:
cval = ctypes.c_uint8(int(val))
kernelargs.append(cval)
elif ty == types.complex64:
kernelargs.append(ctypes.c_float(val.real))
kernelargs.append(ctypes.c_float(val.imag))
elif ty == types.complex128:
kernelargs.append(ctypes.c_double(val.real))
kernelargs.append(ctypes.c_double(val.imag))
else:
raise NotImplementedError(ty, val)
def _calc_padding_for_alignment(align, base):
"""
Returns byte padding required to move the base pointer into proper alignment
"""
rmdr = int(base) % align
if rmdr == 0:
return 0
else:
return align - rmdr
class AutoJitHSAKernel(HSAKernelBase):
def __init__(self, func):
super(AutoJitHSAKernel, self).__init__()
self.py_func = func
self.definitions = {}
from .descriptor import HSATargetDesc
self.typingctx = HSATargetDesc.typingctx
def __call__(self, *args):
kernel = self.specialize(*args)
cfg = kernel.configure(self.global_size, self.local_size, self.stream)
cfg(*args)
def specialize(self, *args):
argtypes = tuple([self.typingctx.resolve_argument_type(a)
for a in args])
kernel = self.definitions.get(argtypes)
if kernel is None:
kernel = compile_kernel(self.py_func, argtypes)
self.definitions[argtypes] = kernel
return kernel
|
{
"content_hash": "eb88ef85eddf230b10f5d9f23b2fc5c4",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 80,
"avg_line_length": 32.18103448275862,
"alnum_prop": 0.5918162335922851,
"repo_name": "stonebig/numba",
"id": "c2efd690c243c199ebb212fff69265bcadf0d1dc",
"size": "14932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/roc/compiler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "228078"
},
{
"name": "C++",
"bytes": "18847"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "2965893"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
from .django_settings import *
INSTALLED_APPS += (
# 'corsheaders',
'wooey',
)
# MIDDLEWARE_CLASSES = [[i] if i == 'django.middleware.common.CommonMiddleware' else ['corsheaders.middleware.CorsMiddleware',i] for i in MIDDLEWARE_CLASSES]
MIDDLEWARE_CLASSES = list(MIDDLEWARE_CLASSES)
MIDDLEWARE_CLASSES.append('{{ project_name }}.middleware.ProcessExceptionMiddleware')
PROJECT_NAME = "{{ project_name }}"
WOOEY_CELERY_APP_NAME = 'wooey.celery'
WOOEY_CELERY_TASKS = 'wooey.tasks'
|
{
"content_hash": "7c57104d176472380ec468f067db8335",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 157,
"avg_line_length": 35,
"alnum_prop": 0.7346938775510204,
"repo_name": "hottwaj/Wooey",
"id": "7ac7eef603016179dbaacc89ffe9392a39d2bc18",
"size": "490",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wooey/conf/project_template/settings/wooey_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1924"
},
{
"name": "HTML",
"bytes": "73002"
},
{
"name": "JavaScript",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "359"
},
{
"name": "Python",
"bytes": "189794"
}
],
"symlink_target": ""
}
|
"""Support code for OAuth, including webhook support."""
from __future__ import absolute_import
import logging
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from readthedocs.integrations.models import Integration
from readthedocs.oauth.services import registry, GitHubService, BitbucketService
log = logging.getLogger(__name__)
SERVICE_MAP = {
Integration.GITHUB_WEBHOOK: GitHubService,
Integration.BITBUCKET_WEBHOOK: BitbucketService,
}
def attach_webhook(project, request=None):
"""Add post-commit hook on project import
This is a brute force approach to adding a webhook to a repository. We try
all accounts until we set up a webhook. This should remain around for legacy
connections -- that is, projects that do not have a remote repository them
and were not set up with a VCS provider.
"""
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls
break
else:
messages.error(
request,
_('Webhook activation failed. '
'There are no connected services for this project.')
)
return None
user_accounts = service.for_user(request.user)
for account in user_accounts:
success, __ = account.setup_webhook(project)
if success:
messages.success(request, _('Webhook activated'))
project.has_valid_webhook = True
project.save()
return True
# No valid account found
if user_accounts:
messages.error(
request,
_('Webhook activation failed. Make sure you have permissions to set it.')
)
else:
messages.error(
request,
_('No accounts available to set webhook on. '
'Please connect your {network} account.'.format(
network=service.adapter(request).get_provider().name
))
)
return False
def update_webhook(project, integration, request=None):
"""Update a specific project integration instead of brute forcing"""
service_cls = SERVICE_MAP.get(integration.integration_type)
if service_cls is None:
return None
account = project.remote_repository.account
service = service_cls(request.user, account)
updated, __ = service.update_webhook(project, integration)
if updated:
messages.success(request, _('Webhook activated'))
project.has_valid_webhook = True
project.save()
return True
messages.error(
request,
_('Webhook activation failed. '
'Make sure you have the necessary permissions.')
)
project.has_valid_webhook = False
project.save()
return False
|
{
"content_hash": "b1ecf07153ab0ede1bb50b83b55f730c",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 85,
"avg_line_length": 33.30952380952381,
"alnum_prop": 0.647962830593281,
"repo_name": "pombredanne/readthedocs.org",
"id": "e3a75ed7c65a53012894bc3c65d11253f9a0a0c3",
"size": "2798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/oauth/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66514"
},
{
"name": "HTML",
"bytes": "205587"
},
{
"name": "JavaScript",
"bytes": "444672"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1175310"
}
],
"symlink_target": ""
}
|
from rest_framework import filters
from rest_framework.viewsets import ModelViewSet
from .models import Tag
from .serializers import TagSerializer
class TagViewSet(ModelViewSet):
# model = Tag
queryset = Tag.objects.all()
serializer_class = TagSerializer
filter_backends = (filters.OrderingFilter,)
ordering_fields = ('name',)
ordering = ('name',)
|
{
"content_hash": "c538ea958e1b2febd657ac6a5336a5b9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 48,
"avg_line_length": 26.785714285714285,
"alnum_prop": 0.7333333333333333,
"repo_name": "ychab/mymoney-server",
"id": "b71f1c28c308d6a3956601c5769270f2ffab2432",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mymoney/tags/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "273138"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
}
|
"""
A collection of methods that load the MNIST data set or load and save neural networks to a file.
"""
import cPickle, gzip, pdb
import tarfile
import ntpath
from neural_network import *
data_dir = "."
def load_digit_data():
print "Opening MNIST dataset"
f = gzip.open(data_dir + "/mnist.pkl.gz", "rb")
training_set, validation_set, test_set = cPickle.load(f)
f.close()
print "Finished opening MNIST dataset"
return training_set, validation_set, test_set
def convert_data_set_into_data_points_and_labels(data_set):
data_x, data_y = data_set
return zip(data_x, data_y)
def load_neural_network(neural_network_file_path):
return cPickle.load(open(neural_network_file_path, "rb"))
def save_neural_network(neural_network_file_path, neural_network):
cPickle.dump(neural_network, open(neural_network_file_path, "wb"))
def load_cifar_10_dataset():
"""
Loads the CIFAR-10 dataset and returns a dictionary mapping dataset file names to datasets a key "label_names" mapping to a list of label names where the index of the array is the associated value in the dataset.
"""
print "Opening CIFAR 10 dataset"
dataset = {}
with tarfile.open(data_dir + "/cifar-10-python.tar.gz", "r:gz") as tar:
for member in tar.getmembers():
if member.isfile():
if "_batch" in member.name:
file_name = ntpath.basename(member.name)
f = tar.extractfile(member)
batch_dataset = cPickle.load(f)
dataset[file_name] = batch_dataset
elif member.name.endswith("batches.meta"):
f = tar.extractfile(member)
label_names = cPickle.load(f)
dataset["meta"] = label_names
print "Finished opening CIFAR 10 dataset"
return dataset
|
{
"content_hash": "70998bf25515471f545c592f3e798080",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 216,
"avg_line_length": 37.18,
"alnum_prop": 0.6395911780527165,
"repo_name": "ashleyjsands/machine-learning",
"id": "a5306a8269e22314d3288aa3d7b8917a1cbce2c8",
"size": "1859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "11689911"
},
{
"name": "Python",
"bytes": "34718"
}
],
"symlink_target": ""
}
|
"""
Install wagtail-relevancy using setuptools
"""
from wagtailrelevancy import __version__
with open('README.rst', 'r') as f:
readme = f.read()
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='wagtail-relevancy',
version=__version__,
description="A plugin for assisting editors with keeping their content up to date.",
long_description=readme,
author='Liam Brenner',
author_email='liam@takeflight.com.au',
url='https://github.com/takeflight/wagtail-relevancy',
install_requires=[
'wagtail>=1.0',
],
zip_safe=False,
license='BSD License',
packages=find_packages(),
include_package_data=True,
package_data={},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
],
)
|
{
"content_hash": "9c93eb97874ea43d85086c80248e7c79",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 88,
"avg_line_length": 24.711111111111112,
"alnum_prop": 0.6492805755395683,
"repo_name": "takeflight/wagtail-relevancy",
"id": "e6139856601fd8c35deeb1a1f7b9693937df9b67",
"size": "1134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7257"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "10707"
}
],
"symlink_target": ""
}
|
from nailgun.objects.plugin import PluginCollection
from nailgun.plugins.attr_plugin import ClusterAttributesPlugin
class PluginManager(object):
@classmethod
def process_cluster_attributes(cls, cluster, attrs, query=None):
if query is None:
query = PluginCollection.all()
for plugin_db in query:
attr_plugin = ClusterAttributesPlugin(plugin_db)
attr_plugin.process_cluster_attributes(cluster, attrs)
@classmethod
def get_plugin_attributes(cls, cluster):
plugins_attrs = {}
for plugin_db in PluginCollection.all_newest():
attr_plugin = ClusterAttributesPlugin(plugin_db)
attrs = attr_plugin.get_plugin_attributes(cluster)
plugins_attrs.update(attrs)
return plugins_attrs
@classmethod
def get_cluster_plugins_with_tasks(cls, cluster):
attr_plugins = []
for plugin_db in cluster.plugins:
attr_pl = ClusterAttributesPlugin(plugin_db)
attr_pl.set_cluster_tasks(cluster)
attr_plugins.append(attr_pl)
return attr_plugins
|
{
"content_hash": "239c3dbba1a3f362f162ac2b4e7d06f3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 68,
"avg_line_length": 34.875,
"alnum_prop": 0.6630824372759857,
"repo_name": "andrei4ka/fuel-web-redhat",
"id": "3aab2e72914994b2b70454c8c2f1b6c6cf44f4eb",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/plugins/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100524"
},
{
"name": "JavaScript",
"bytes": "639783"
},
{
"name": "Makefile",
"bytes": "5891"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3206343"
},
{
"name": "Ruby",
"bytes": "33423"
},
{
"name": "Shell",
"bytes": "31460"
}
],
"symlink_target": ""
}
|
from postcards.postcards import Postcards
from postcards.plugin_folder.slice_image import make_tiles, store_tiles
import sys
import os
import random
import ntpath
from PIL import Image
import os
from time import gmtime, strftime
class PostcardsFolder(Postcards):
"""
Send postcards with images from a local folder
"""
supported_ext = ['.jpg', '.jpeg', '.png']
high_prio_folder = '.priority'
def can_handle_command(self, command):
return True if command in ['slice'] else False
def handle_command(self, command, args):
if command == 'slice':
self.slice_image(source_image=self._make_absolute_path(args.picture),
tile_width=args.width, tile_height=args.height)
def build_plugin_subparser(self, subparsers):
parser_slice = subparsers.add_parser('slice', help='slice an image into tiles',
description='slice an image into tiles to create a poster. \n'
+ 'tiles need to be a multiple of 154x111 pixels '
+ 'in order not to be cropped.')
parser_slice.add_argument('picture',
type=str,
help='path to a picture to slice into tiles')
parser_slice.add_argument('width',
type=int,
help='tile width')
parser_slice.add_argument('height',
type=int,
help='tile height')
def get_img_and_text(self, payload, cli_args):
if not payload.get('folder'):
self.logger.error("no folder set in configuration")
exit(1)
folder = self._make_absolute_path(payload.get('folder'))
img_path = self._pick_image(folder)
if not img_path:
self.logger.error("no images in folder " + folder)
exit(1)
move_info = 'moving to sent folder' if payload.get('move') else 'no move'
self.logger.info('choosing image {} ({})'.format(img_path, move_info))
file = open(img_path, 'rb')
if payload.get('move'):
self._move_to_sent(folder, img_path)
return {
'img': file,
'text': ''
}
def slice_image(self, source_image, tile_width, tile_height):
if not os.path.isfile(source_image):
self.logger.error('file {} does not exist'.format(source_image))
exit(1)
file = open(source_image, 'rb')
with Image.open(file) as image:
cwd = os.getcwd()
basename = strftime("slice_%Y-%m-%d_%H-%M-%S", gmtime())
directory = os.path.join(cwd, basename)
self.logger.info('slicing picture {} into tiles'.format(source_image))
tiles = make_tiles(image, tile_width=tile_width, tile_height=tile_height)
store_tiles(tiles, directory)
self.logger.info('picture sliced into {} tiles {}'.format(len(tiles), directory))
def _pick_image(self, folder):
candidates = []
high_prio = os.path.join(folder, self.high_prio_folder)
if os.path.exists(high_prio):
for file in os.listdir(high_prio):
for ext in self.supported_ext:
if file.lower().endswith(ext):
candidates.append(os.path.join(self.high_prio_folder, file))
if not candidates:
for file in os.listdir(folder):
for ext in self.supported_ext:
if file.lower().endswith(ext):
candidates.append(file)
if not candidates:
self.logger.error("no images in folder " + folder)
exit(1)
img_name = random.choice(candidates)
img_path = os.path.join(folder, img_name)
return img_path
def _move_to_sent(self, picture_folder, image_path):
sent_folder = os.path.join(picture_folder, 'sent')
if not os.path.exists(sent_folder):
os.makedirs(sent_folder)
self.logger.debug('creating folder {}'.format(sent_folder))
img_name = self._get_filename(image_path)
sent_img_path = os.path.join(sent_folder, img_name)
os.rename(image_path, sent_img_path)
self.logger.debug('moving image from {} to {}'.format(image_path, sent_img_path))
def _get_filename(self, path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def _make_absolute_path(self, path):
if not os.path.isabs(path):
return os.path.join(os.getcwd(), path)
else:
return path
def main():
PostcardsFolder().main(sys.argv[1:])
if __name__ == '__main__':
main()
|
{
"content_hash": "d0728071d5ceb65dc21d1b18b865b785",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 107,
"avg_line_length": 36.17037037037037,
"alnum_prop": 0.5535531435592873,
"repo_name": "abertschi/postcards",
"id": "6f40212b6336178c8350a4788cf9c5af73234d57",
"size": "4924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postcards/plugin_folder/postcards_folder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8082"
},
{
"name": "JavaScript",
"bytes": "6977"
},
{
"name": "Python",
"bytes": "62395"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ScenesConfig(AppConfig):
name = 'scenes'
|
{
"content_hash": "52cb69635aa59285b8d6d7cf2ec9a6f2",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.4,
"alnum_prop": 0.7471264367816092,
"repo_name": "jordifierro/abidria-api",
"id": "9ca11bbf7ca0b7d63bf22faf4e5a1668e87ded5a",
"size": "87",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenes/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "391"
},
{
"name": "Python",
"bytes": "303127"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tocayoapp', '0004_desig_uid'),
]
operations = [
migrations.RenameField('Token', 'token', 'name'),
]
|
{
"content_hash": "eb44d3eac9dabffca1a45259a97a89cd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 57,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.6343283582089553,
"repo_name": "philpot/tocayo",
"id": "28bb29230a687e389ce0413959293e3f1696b308",
"size": "338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tocayoproj/tocayoapp/migrations/0005_auto_20151210_0452.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "187326"
},
{
"name": "CLIPS",
"bytes": "159266"
},
{
"name": "Common Lisp",
"bytes": "970346"
},
{
"name": "Groff",
"bytes": "23493"
},
{
"name": "HTML",
"bytes": "10146"
},
{
"name": "Makefile",
"bytes": "2911"
},
{
"name": "Python",
"bytes": "408154"
},
{
"name": "Shell",
"bytes": "2020"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import os
import random
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base as checkpointable_base
from tensorflow.python.training.checkpointable import tracking as checkpointable_tracking
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import compat
class SaverTest(test.TestCase):
def basicSaveRestore(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
# Initialize all variables
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer(), v2_init])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"v2": v2.saveable
}, restore_sequentially=True)
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Assert that the variables are not initialized.
if not context.executing_eagerly():
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.session(graph=ops_lib.Graph()) as sess:
v0_2 = variable_op(1000.0, name="v0")
v1_2 = variable_op(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2_2.insert("k1000", 3000.0)
# Check that the parameter nodes have been initialized.
if not context.executing_eagerly():
init_all_op = [variables.global_variables_initializer(), v2_init]
self.evaluate(init_all_op)
# TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty
# table as it claims in eager mode?
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
# Restore the values saved earlier in the parameter nodes.
save2 = saver_module.Saver({"v0": v0_2, "v1": v1_2, "v2": v2_2.saveable})
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def testBasic(self):
self.basicSaveRestore(variables.Variable)
@test_util.run_in_graph_and_eager_modes
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
@test_util.run_deprecated_v1
def testResourceColocation(self):
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2)
with ops_lib.device("/job:ps/device:GPU:0"):
v = variable_scope.get_variable("v0",
shape=[10, 2],
partitioner=partitioner,
use_resource=True)
saver_module.Saver({"v0": v}).build()
save_op = None
for op in ops_lib.get_default_graph().get_operations():
if op.type == "SaveV2":
save_op = op
break
assert save_op is not None
for save_inp in save_op.inputs[3:]:
# Input to SaveV2 op is placed on CPU of the same device as the Variable.
self.assertEqual("/job:ps/device:CPU:0", save_inp.device)
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
for _ in range(num_graphs):
with ops_lib.Graph().as_default() as g:
for i in range(20):
resource_variable_ops.ResourceVariable(i, name="var%s" % i)
saver_module.Saver()
graph_defs.append(g.as_graph_def())
for i in range(num_graphs - 1):
self.assertEqual(graph_defs[i], graph_defs[i + 1])
def testEagerBasic(self):
with context.eager_mode():
ckpt_prefix = os.path.join(self.get_temp_dir(), "ckpt")
v1 = resource_variable_ops.ResourceVariable(3.14, name="v1")
v2 = resource_variable_ops.ResourceVariable([1, 2], name="v2")
save = saver_module.Saver([v1, v2])
save.save(None, ckpt_prefix)
v1.assign(0.0)
v2.assign([0, 0])
self.assertNear(0.0, self.evaluate(v1), 1e-5)
self.assertAllEqual([0, 0], self.evaluate(v2))
save.restore(None, ckpt_prefix)
self.assertNear(3.14, self.evaluate(v1), 1e-5)
self.assertAllEqual([1, 2], self.evaluate(v2))
def testEagerGraphCompatibility(self):
# Save from graph mode and restore from eager mode.
graph_ckpt_prefix = os.path.join(self.get_temp_dir(), "graph_ckpt")
with context.graph_mode():
with self.session(graph=ops_lib.Graph()) as sess:
# Create a graph model and save the checkpoint.
w1 = resource_variable_ops.ResourceVariable(1.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(2.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
self.evaluate(variables.global_variables_initializer())
graph_saver.save(sess, graph_ckpt_prefix)
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w1 = resource_variable_ops.ResourceVariable(0.0, name="w1")
w2 = resource_variable_ops.ResourceVariable(0.0, name="w2")
graph_saver = saver_module.Saver([w1, w2])
graph_saver.restore(None, graph_ckpt_prefix)
self.assertAllEqual(self.evaluate(w1), 1.0)
self.assertAllEqual(self.evaluate(w2), 2.0)
# Save from eager mode and restore from graph mode.
eager_ckpt_prefix = os.path.join(self.get_temp_dir(), "eager_ckpt")
with context.eager_mode():
ops_lib._default_graph_stack.reset() # pylint: disable=protected-access
ops_lib.reset_default_graph()
w3 = resource_variable_ops.ResourceVariable(3.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(4.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
graph_saver.save(None, eager_ckpt_prefix)
with context.graph_mode():
with self.session(graph=ops_lib.Graph()) as sess:
w3 = resource_variable_ops.ResourceVariable(0.0, name="w3")
w4 = resource_variable_ops.ResourceVariable(0.0, name="w4")
graph_saver = saver_module.Saver([w3, w4])
self.evaluate(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
@test_util.run_in_graph_and_eager_modes
def testResourceSaveRestoreCachingDevice(self):
save_path = os.path.join(self.get_temp_dir(), "resource_cache")
with self.session(graph=ops_lib.Graph()) as sess:
v = resource_variable_ops.ResourceVariable([1], caching_device="/cpu:0",
name="v")
if context.executing_eagerly():
sess = None
else:
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver([v])
save.save(sess, save_path)
save2 = saver_module.Saver([v])
save2.restore(sess, save_path)
self.assertEquals(self.evaluate(v), [1])
def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
with ops_lib.Graph().as_default() as g:
v = resource_variable_ops.ResourceVariable(1.0, name="v")
with ops_lib.name_scope("saver1"):
saver_module.Saver()
with ops_lib.name_scope("saver2"):
saver_module.Saver({"name": v})
ops_in_saver1_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver1/") and
not op.name.startswith("saver1/save/"))]
self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])
ops_in_saver2_scope_but_not_save_scope = [
op for op in g.get_operations()
if (op.name.startswith("saver2/") and
not op.name.startswith("saver2/save/"))]
self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])
@test_util.run_deprecated_v1
def testSaveCopyRestoreWithSaveRelativePaths(self):
"""Save, copy checkpoint dir and restore from copied dir.
This only works for save_relative_paths=True.
"""
save_dir1 = os.path.join(self.get_temp_dir(), "save_dir1")
os.mkdir(save_dir1)
save_path1 = os.path.join(save_dir1, "save_copy_restore")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver(
var_list={
"v0": v0,
"v1": v1,
"v2": v2.saveable},
restore_sequentially=True,
save_relative_paths=True)
init_all_op = [variables.global_variables_initializer(), v2_init]
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path1)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path1, val)
self.assertEqual(
checkpoint_management.latest_checkpoint(save_dir1), save_path1)
save_dir2 = os.path.join(self.get_temp_dir(), "save_dir2")
os.renames(save_dir1, save_dir2)
save_path2 = os.path.join(save_dir2, "save_copy_restore")
self.assertEqual(
checkpoint_management.latest_checkpoint(save_dir2), save_path2)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.cached_session() as sess:
v0 = variables.VariableV1(-1.0, name="v0")
v1 = variables.VariableV1(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver({"v0": v0, "v1": v1, "v2": v2.saveable})
# Assert that the variables are not initialized.
self.assertEqual(
len(variables.report_uninitialized_variables().eval()), 2)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path2)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
@test_util.run_deprecated_v1
def testFilenameTensor(self):
v0 = variables.VariableV1(0, name="v0")
filename = b"somerandomfilename"
save = saver_module.Saver({"v0": v0}, filename=filename)
with self.cached_session() as sess:
tensor = sess.graph.get_tensor_by_name(
save.saver_def.filename_tensor_name)
self.assertEqual(self.evaluate(tensor), filename)
def testInvalidPath(self):
v0 = variables.VariableV1(0, name="v0")
for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
with self.cached_session() as sess:
save = saver_module.Saver({"v0": v0}, write_version=ver)
with self.assertRaisesRegexp(
ValueError, "The passed save_path is not a valid checkpoint:"):
save.restore(sess, "invalid path")
@test_util.run_v1_only("b/120545219")
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.cached_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = variables.VariableV1(np.int64(15), name="v")
save = saver_module.Saver({"v": v}, restore_sequentially=True)
self.evaluate(variables.global_variables_initializer())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.cached_session() as sess:
v = variables.VariableV1(np.int64(-1), name="v")
save = saver_module.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v" in e.message):
self.evaluate(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), self.evaluate(v))
def testSomeErrors(self):
with ops_lib.Graph().as_default():
v0 = variables.VariableV1([10.0], name="v0")
v1 = variables.VariableV1([20.0], name="v1")
v2 = variables.VariableV1([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
saver_module.Saver([v0, v1, v2])
# The names are different and will work.
saver_module.Saver({"vee1": v1, "other": [v2]})
# Partitioned variables also cause name conflicts.
p_v1 = variable_scope.get_variable(
"p_v1",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2 = variable_scope.get_variable(
"p_v2",
shape=[4, 5],
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
p_v2._name = "p_v1"
with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
saver_module.Saver([p_v1, p_v2])
def testSameName(self):
with ops_lib.Graph().as_default():
v0 = variables.VariableV1([10.0], name="v0")
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saving one variable under two names raises an error.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v0"):
saver_module.Saver({"v0": v0, "v0too": v0})
# Ditto for custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
@test_util.run_v1_only("b/120545219")
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
self.evaluate(variables.global_variables_initializer())
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variables.VariableV1(-1.0, name="v0")
v1 = variables.VariableV1(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
self.evaluate(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
self.evaluate(v1)
self.assertEqual(0, len(self.evaluate(v2.keys())))
self.assertEqual(0, len(self.evaluate(v2.values())))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(30.0, self.evaluate(v2.values()))
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.VariableV1(1000.0, name="v0")
v1_2 = variables.VariableV1(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, self.evaluate(v0_2))
self.assertEqual(2000.0, self.evaluate(v1_2))
self.assertEqual(b"k1000", self.evaluate(v2_2.keys()))
self.assertEqual(3000.0, self.evaluate(v2_2.values()))
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0_2))
self.assertEqual(20.0, self.evaluate(v1_2))
self.assertEqual(b"k1", self.evaluate(v2_2.keys()))
self.assertEqual(30.0, self.evaluate(v2_2.values()))
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if not context.executing_eagerly():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
@test_util.run_deprecated_v1
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.cached_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.cached_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.VariableV1(123.45)
save = saver_module.Saver({"v0": v0_1})
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.VariableV1(543.21)
save = saver_module.Saver({"v0": v0_2})
self.evaluate(variables.global_variables_initializer())
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.VariableV1(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.VariableV1(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
self.evaluate(variables.global_variables_initializer())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(1.0)
twos = variables.VariableV1([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(0.0)
twos = variables.VariableV1([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, self.evaluate(one))
self.assertAllClose([2.0, 2.0, 2.0], self.evaluate(twos))
self.assertEqual(b"k1", self.evaluate(v2.keys()))
self.assertEqual(3.0, self.evaluate(v2.values()))
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.VariableV1(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.VariableV1(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.VariableV1([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.VariableV1(0.0)
twos = variables.VariableV1([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, self.evaluate(one))
self.assertAllClose([2.0, 2.0, 2.0], self.evaluate(twos))
@test_util.run_v1_only("b/120545219")
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.executing_eagerly():
sess = None
else:
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the graph.
save.save(sess, save_path)
with self.cached_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.VariableV1(10.0, name="v0")
v1 = variables.VariableV1(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.cached_session() as sess:
# Initialize all variables
self.evaluate(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
save.save(sess, save_path)
def testSaveRestoreAndValidateVariableDtype(self):
for variable_op in [
variables.Variable, resource_variable_ops.ResourceVariable
]:
save_path = os.path.join(self.get_temp_dir(), "basic_save_restore")
# Build the first session.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(10.0, name="v0", dtype=dtypes.float32)
if not context.executing_eagerly():
self.evaluate([variables.global_variables_initializer()])
save = saver_module.Saver({"v0": v0})
save.save(sess, save_path)
# Start a second session.
with self.session(graph=ops_lib.Graph()) as sess:
v0_wrong_dtype = variable_op(1, name="v0", dtype=dtypes.int32)
# Restore the saved value with different dtype
# in the parameter nodes.
save = saver_module.Saver({"v0": v0_wrong_dtype})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"original dtype"):
save.restore(sess, save_path)
# Test restoring large tensors (triggers a thread pool)
def testRestoreLargeTensors(self):
save_dir = self.get_temp_dir()
def _model():
small_v = [variable_scope.get_variable(
"small%d" % i, shape=[10, 2], use_resource=True) for i in range(5)]
large_v = [variable_scope.get_variable(
"large%d" % i, shape=[32000, 1000], use_resource=True)
for i in range(3)]
return small_v + large_v
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
orig_vars = _model()
self.evaluate(variables.global_variables_initializer())
save = saver_module.Saver(max_to_keep=1)
self.evaluate(variables.global_variables_initializer())
save.save(sess, save_dir)
orig_vals = self.evaluate(orig_vars)
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as sess:
restored_vars = _model()
save = saver_module.Saver(max_to_keep=1)
save.restore(sess, save_dir)
restored_vals = self.evaluate(restored_vars)
for orig, restored in zip(orig_vals, restored_vals):
self.assertAllEqual(orig, restored)
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = checkpoint_management.meta_graph_filename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t1.insert("k22", 44.0).run()
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
@test_util.run_deprecated_v1
def testSaverDef(self):
with self.cached_session():
v0 = variables.VariableV1(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(partitioner=None):
with self.session(graph=ops_lib.Graph()) as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variables.VariableV1(rnd, name=var_name)]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({var_name: vs[0]})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(partitioner=None):
with self.session(graph=ops_lib.Graph()) as sess:
if partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variables.VariableV1(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: new_vs[0]
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
else:
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into the same number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores PartitionedVariable.
saved_full = _save()
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
@test_util.run_deprecated_v1
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
@test_util.run_deprecated_v1
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
class SaveRestoreShardedTestV2(SaveRestoreShardedTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class MaxToKeepTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def assertCheckpointState(self, model_checkpoint_path,
all_model_checkpoint_paths, save_dir):
checkpoint_state = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(checkpoint_state.model_checkpoint_path,
model_checkpoint_path)
self.assertEqual(checkpoint_state.all_model_checkpoint_paths,
all_model_checkpoint_paths)
def testMaxToKeepEager(self):
with context.eager_mode():
save_dir = self._get_test_dir("max_to_keep_eager")
v = variable_scope.variable(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
self.assertEqual([], save.last_checkpoints)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(None, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver({"v": v}, max_to_keep=2)
save2.set_last_checkpoints(save.last_checkpoints)
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(None, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
s2 = save2.save(None, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
@test_util.run_deprecated_v1
def testNonSharded(self):
save_dir = self._get_test_dir("max_to_keep_non_sharded")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v}, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s1],
save_dir=save_dir)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s1, s2],
save_dir=save_dir)
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertCheckpointState(
model_checkpoint_path=s3,
all_model_checkpoint_paths=[s2, s3],
save_dir=save_dir)
# Create a second helper, identical to the first.
save2 = saver_module.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = saver_module.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s3, s2],
save_dir=save_dir)
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
# Deleted by the first helper.
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
# Even though the file for s1 exists, this saver isn't aware of it, which
# is why it doesn't end up in the checkpoint state.
self.assertCheckpointState(
model_checkpoint_path=s2,
all_model_checkpoint_paths=[s2],
save_dir=save_dir)
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(checkpoint_management.checkpoint_exists(s3))
self.assertFalse(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s3)))
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s2)))
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertTrue(
checkpoint_management.checkpoint_exists(
checkpoint_management.meta_graph_filename(s1)))
self.assertCheckpointState(
model_checkpoint_path=s1,
all_model_checkpoint_paths=[s2, s1],
save_dir=save_dir)
def testSharded(self):
save_dir = self._get_test_dir("max_to_keep_sharded")
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.VariableV1(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = variables.VariableV1(222, name="v1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, sharded=True, max_to_keep=2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s1)))
else:
self.assertEqual(4, len(gfile.Glob(s1 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1 + "*")))
self.assertFalse(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s2)))
else:
self.assertEqual(4, len(gfile.Glob(s2 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s2)))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(2, len(gfile.Glob(s3)))
else:
self.assertEqual(4, len(gfile.Glob(s3 + "*")))
self.assertTrue(
gfile.Exists(checkpoint_management.meta_graph_filename(s3)))
def testNoMaxToKeep(self):
save_dir = self._get_test_dir("no_max_to_keep")
save_dir2 = self._get_test_dir("max_to_keep_0")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
self.evaluate(variables.global_variables_initializer())
# Test max_to_keep being None.
save = saver_module.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
# Test max_to_keep being 0.
save2 = saver_module.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(s2))
def testNoMetaGraph(self):
save_dir = self._get_test_dir("no_meta_graph")
with self.cached_session() as sess:
v = variables.VariableV1(10.0, name="v")
save = saver_module.Saver({"v": v})
self.evaluate(variables.global_variables_initializer())
s1 = save.save(sess, os.path.join(save_dir, "s1"), write_meta_graph=False)
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(
gfile.Exists(checkpoint_management.meta_graph_filename(s1)))
class KeepCheckpointEveryNHoursTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_in_graph_and_eager_modes
@test.mock.patch.object(saver_module, "time")
def testNonSharded(self, mock_time):
save_dir = self._get_test_dir("keep_checkpoint_every_n_hours")
with self.cached_session() as sess:
v = variable_scope.variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
self.evaluate(variables.global_variables_initializer())
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
mock_time.time.return_value = start_time
save = saver_module.Saver(
{
"v": v
}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 1 seconds have elapsed so s1 will be old enough to keep.
# sleep may return early, don't trust it.
mock_time.time.return_value = start_time + 1.0
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(checkpoint_management.checkpoint_exists(s1))
self.assertFalse(checkpoint_management.checkpoint_exists(s2))
self.assertTrue(checkpoint_management.checkpoint_exists(s3))
self.assertTrue(checkpoint_management.checkpoint_exists(s4))
class SaveRestoreWithVariableNameMap(test.TestCase):
def _testNonReshape(self, variable_op):
save_path = os.path.join(self.get_temp_dir(), "non_reshape")
with self.session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variable_op(10.0, name="v0")
v1 = variable_op(20.0, name="v1")
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
self.evaluate(variables.global_variables_initializer())
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = saver_module.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="v0")
v1 = variable_op(-1.0, name="v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
if not context.executing_eagerly():
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.session(graph=ops_lib.Graph()) as sess:
v0 = variable_op(-1.0, name="restore_prefix/v0")
v1 = variable_op(-1.0, name="restore_prefix/v1")
if not context.executing_eagerly():
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v0)
with self.assertRaisesOpError("uninitialized"):
self.evaluate(v1)
# Restore the saved values in the parameter nodes.
save = saver_module.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
@test_util.run_in_graph_and_eager_modes
def testNonReshapeResourceVariable(self):
self._testNonReshape(resource_variable_ops.ResourceVariable)
def testNonReshapeVariable(self):
self._testNonReshape(variables.Variable)
class MetaGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
@test_util.run_v1_only("b/120545219")
def testAddCollectionDef(self):
test_dir = self._get_test_dir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(1.0, name="v0")
control_flow_ops.cond(
math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),
lambda: math_ops.subtract(v0, 1))
control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),
lambda i: math_ops.add(i, 1), [v0])
var = variables.VariableV1(constant_op.constant(0, dtype=dtypes.int64))
count_up_to = var.count_up_to(3)
input_queue = data_flow_ops.FIFOQueue(
30, dtypes.float32, shared_name="collection_queue")
qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])
variables.global_variables_initializer()
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Adds a set of collections.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("float_collection", 3.5)
ops_lib.add_to_collection("string_collection", "hello")
ops_lib.add_to_collection("variable_collection", v0)
# Add QueueRunners.
queue_runner_impl.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
self.assertTrue(meta_graph_def.HasField("meta_info_def"))
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, "")
self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,
"")
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 12)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(
self, meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(10.0, name="v0")
# Creates a saver.
save = saver_module.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
ops_lib.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
ops_lib.add_to_collection("int_collection", 3)
ops_lib.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.VariableV1([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.VariableV1(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
self.evaluate(variables.global_variables_initializer())
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Imports from meta_graph.
saver_module.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = ops_lib.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
self.evaluate(v0))
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
self.evaluate(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testMultiSaverCollection(self):
test_dir = self._get_test_dir("saver_collection")
self._testMultiSaverCollectionSave(test_dir)
self._testMultiSaverCollectionRestore(test_dir)
@test_util.run_v1_only("b/120545219")
def testClearExtraneousSavers(self):
test_dir = self._get_test_dir("clear_extraneous_savers")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Creates a graph.
v0 = variables.VariableV1([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name="v0")
v1 = variables.VariableV1(11.0, name="v1")
# Creates 2 savers.
saver0 = saver_module.Saver({"v0": v0}, name="saver0")
saver1 = saver_module.Saver({"v1": v1}, name="saver1")
ops_lib.add_to_collection("savers", saver0)
ops_lib.add_to_collection("savers", saver1)
self.evaluate(variables.global_variables_initializer())
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = saver_module.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.
collection_def = meta_graph_def1.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there is 1 entry in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(1, len(savers.value))
# Verifies that saver0 graph nodes are omitted from the saver1 export
self.assertEqual(33, len(meta_graph_def0.graph_def.node))
self.assertEqual(21, len(meta_graph_def1.graph_def.node))
@test_util.run_deprecated_v1
def testBinaryAndTextFormat(self):
test_dir = self._get_test_dir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.session(graph=ops_lib.Graph()):
# Creates a graph.
variables.VariableV1(10.0, name="v0")
# Exports the graph as binary format.
saver_module.export_meta_graph(filename, as_text=False)
with self.session(graph=ops_lib.Graph()):
# Imports the binary format graph.
saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.session(graph=ops_lib.Graph()):
# Imports the text format graph.
saver_module.import_meta_graph(filename)
# Writes wrong contents to the file.
graph_io.write_graph(saver.as_saver_def(),
os.path.dirname(filename),
os.path.basename(filename))
with self.session(graph=ops_lib.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "Cannot parse file"):
saver_module.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(IOError,
lambda e: "does not exist"):
saver_module.import_meta_graph(filename)
@test_util.run_v1_only("b/120545219")
def testSliceVariable(self):
test_dir = self._get_test_dir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.cached_session():
v1 = variables.VariableV1([20.0], name="v1")
v2 = variables.VariableV1([20.0], name="v2")
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = saver_module.Saver({"first": v1, "second": v2})
self.evaluate(variables.global_variables_initializer())
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with ops_lib.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
test_util.assert_meta_graph_protos_equal(self, meta_graph_def,
new_meta_graph_def)
def _testGraphExtensionSave(self, test_dir):
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])
with ops_lib.name_scope("hidden1"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test coverage
# the save and restore of control flow context (which doesn't make any
# sense here from a machine learning perspective). The typical biases is
# a simple Variable without the conditions.
biases = variables.VariableV1(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(
loop_cond, loop_body,
[constant_op.constant(0),
variables.VariableV1(array_ops.zeros([32]))])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
init_all_op = variables.global_variables_initializer()
with self.cached_session() as sess:
# Initializes all the variables.
self.evaluate(init_all_op)
# Runs to logit.
self.evaluate(logits)
# Creates a saver.
saver0 = saver_module.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self, test_dir):
filename = os.path.join(test_dir, "metafile")
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Adds loss and train.
labels = constant_op.constant(0, dtypes.int32, shape=[100], name="labels")
batch_size = array_ops.size(labels)
labels = array_ops.expand_dims(labels, 1)
indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)
concated = array_ops.concat([indices, labels], 1)
onehot_labels = sparse_ops.sparse_to_dense(
concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)
logits = ops_lib.get_collection("logits")[0]
cross_entropy = nn_ops.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
loss = math_ops.reduce_mean(cross_entropy, name="xentropy_mean")
summary.scalar("loss", loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
ops_lib.add_to_collection("train_op", train_op)
# Runs train_op.
self.evaluate(train_op)
# Generates MetaGraphDef.
saver_module.export_meta_graph(train_filename)
def _testRestoreFromTrainGraphWithControlContext(self, test_dir):
train_filename = os.path.join(test_dir, "train_metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.session(graph=ops_lib.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = saver_module.import_meta_graph(train_filename)
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
train_op = ops_lib.get_collection("train_op")[0]
self.evaluate(train_op)
@test_util.run_deprecated_v1
def testGraphExtension(self):
test_dir = self._get_test_dir("graph_extension")
self._testGraphExtensionSave(test_dir)
self._testGraphExtensionRestore(test_dir)
self._testRestoreFromTrainGraphWithControlContext(test_dir)
def _testGradientSerDes(self, graph_fn):
"""Tests that gradients can be computed after exporting and importing.
Builds a graph, exports it, and verifies that it can be imported and the
gradient can be built and run correctly.
Args:
graph_fn: takes a single float Tensor argument as input, outputs a single
Tensor
"""
test_dir = self._get_test_dir("nested_control_flow")
filename = os.path.join(test_dir, "metafile")
saver_ckpt = os.path.join(test_dir, "saver.ckpt")
# Create while loop using `outer_body_fn`.
with ops_lib.Graph().as_default():
var = variables.VariableV1(0.0)
var_name = var.name
output = graph_fn(var)
output_name = output.name
init_op = variables.global_variables_initializer()
# Generate a MetaGraphDef containing the while loop.
with session.Session() as sess:
self.evaluate(init_op)
self.evaluate(output)
saver = saver_module.Saver()
saver.save(sess, saver_ckpt)
saver.export_meta_graph(filename)
# Build and run the gradients of the while loop. We use this below to
# verify that the gradients are correct with an imported MetaGraphDef.
grad = gradients_impl.gradients([output], [var])
# Turn off constant folding to avoid breaking testNestedControlFlowSerDes.
# It appears that a missing control dependency in the gradient graph
# causes the fetch node to not be triggered.
no_constfold_config = config_pb2.ConfigProto()
no_constfold_config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with session.Session(config=no_constfold_config) as sess:
self.evaluate(init_op)
expected_grad_value = self.evaluate(grad)
# Restore the MetaGraphDef into a new Graph.
with ops_lib.Graph().as_default():
with session.Session() as sess:
saver = saver_module.import_meta_graph(filename)
saver.restore(sess, saver_ckpt)
# Make sure we can still build gradients and get the same result.
var = ops_lib.get_default_graph().get_tensor_by_name(var_name)
output = ops_lib.get_default_graph().get_tensor_by_name(output_name)
grad = gradients_impl.gradients([output], [var])
init_op = variables.global_variables_initializer()
with session.Session(config=no_constfold_config) as sess:
self.evaluate(init_op)
actual_grad_value = self.evaluate(grad)
self.assertEqual(expected_grad_value, actual_grad_value)
def _testWhileLoopAndGradientSerDes(self, outer_body_fn):
# Build a while loop with `outer_body_fn`, export it, and verify that it can
# be imported and the gradient can be built and run correctly.
# pylint: disable=g-long-lambda
return self._testGradientSerDes(
lambda x: control_flow_ops.while_loop(
lambda i, y: i < 5, outer_body_fn, [0, x])[1])
# pylint: enable=g-long-lambda
def testNestedWhileLoopsSerDes(self):
# Test two simple nested while loops.
def body(i, x):
_, r = control_flow_ops.while_loop(lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])
return i + 1, x + r
self._testWhileLoopAndGradientSerDes(body)
def testNestedControlFlowSerDes(self):
# Test while loop in a cond in a while loop.
# pylint: disable=g-long-lambda
def body(i, x):
cond_result = control_flow_ops.cond(
i > 0,
lambda: control_flow_ops.while_loop(
lambda j, y: j < 3,
lambda j, y: (j + 1, y + x),
[0, 0.0])[1],
lambda: x)
return i + 1, cond_result
# pylint: enable=g-long-lambda
self._testWhileLoopAndGradientSerDes(body)
def testNestedCondsSerDes(self):
# Test conds in a cond.
# pylint: disable=g-long-lambda
self._testGradientSerDes(lambda x: control_flow_ops.cond(
x > 0,
lambda: control_flow_ops.cond(x > 3,
lambda: array_ops.identity(x),
lambda: math_ops.multiply(x, 2.0)),
lambda: control_flow_ops.cond(x < -3,
lambda: constant_op.constant(1.0),
lambda: math_ops.multiply(x, -1.0))))
# pylint: enable=g-long-lambda
@test_util.run_v1_only("b/120545219")
def testStrippedOpListDef(self):
with self.cached_session():
# Creates a graph.
v0 = variables.VariableV1(0.0)
var = variables.VariableV1(10.0)
math_ops.add(v0, var)
@function.Defun(dtypes.float32)
def minus_one(x):
return x - 1
minus_one(array_ops.identity(v0))
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp",
"PlaceholderWithDefault", "RestoreV2", "SaveSlices", "Sub",
"VariableV2"
])
else:
self.assertEqual(ops, [
"Add", "Assign", "Const", "Identity", "NoOp",
"PlaceholderWithDefault", "RestoreV2", "SaveV2", "Sub", "VariableV2"
])
# Test calling stripped_op_list_for_graph directly
op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
@test_util.run_deprecated_v1
def testStripDefaultValuedAttrs(self):
"""Verifies that default valued attrs are stripped, unless disabled."""
# With strip_default_attrs enabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must be removed.
with self.cached_session():
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=True)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# With strip_default_attrs disabled, attributes "T" (float32) and "Tout"
# (complex64) in the "Complex" op must *not* be removed, even if they map
# to their defaults.
with self.session(graph=ops_lib.Graph()):
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
save = saver_module.Saver({"real_num": real_num, "imag_num": imag_num})
variables.global_variables_initializer()
meta_graph_def = save.export_meta_graph(strip_default_attrs=False)
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
@test_util.run_deprecated_v1
def testImportIntoNamescope(self):
# Test that we can import a meta graph into a namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
self.evaluate(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph, import_scope="new_model")
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testImportIntoNamescopeWithoutVariables(self):
# Save a simple graph that contains no variables into a checkpoint.
test_dir = self._get_test_dir("no_vars_graph")
filename = os.path.join(test_dir, "ckpt")
graph_1 = ops_lib.Graph()
with session.Session(graph=graph_1) as sess:
constant_op.constant([1, 2, 3], name="x")
constant_op.constant([1, 2, 3], name="y")
saver = saver_module.Saver(allow_empty=True)
saver.save(sess, filename)
# Create a fresh graph.
graph_2 = ops_lib.Graph()
with session.Session(graph=graph_2) as sess:
# Restore the above checkpoint under scope "subgraph_1".
new_saver_1 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="subgraph_1")
# There are no variables to restore, so import_meta_graph should not
# return a Saver.
self.assertIsNone(new_saver_1)
# Create a variable in graph_2 under scope "my_scope".
variables.VariableV1(array_ops.zeros([10]), name="my_scope/my_var")
self.evaluate(variables.global_variables_initializer())
# Restore the checkpoint into a different scope "subgraph_2".
new_saver_2 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="subgraph_2")
# Because the variable does not live in scope "subgraph_2",
# import_meta_graph should not attempt to restore the variable. So,
# import_meta_graph still won't return a Saver instance.
self.assertIsNone(new_saver_2)
# However, if we restore the checkpoint under scope "my_scope",
# import_meta_graph will detect the variable and return a Saver for
# restoring it. This should happen even when the variable does not
# originate from graph_1.
new_saver_3 = saver_module.import_meta_graph(
filename + ".meta", graph=graph_2, import_scope="my_scope")
self.assertIsInstance(new_saver_3, saver_module.Saver)
@test_util.run_deprecated_v1
def testImportIntoImplicitNamescope(self):
# Test that we can import a meta graph into an implicit namescope.
test_dir = self._get_test_dir("import_into_namescope")
filename = os.path.join(test_dir, "ckpt")
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
with session.Session() as sess:
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name="logits")
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit, name="cost")
adam.AdamOptimizer().minimize(cost, name="optimize")
saver = saver_module.Saver()
self.evaluate(variables.global_variables_initializer())
saver.save(sess, filename)
graph = ops_lib.Graph()
with session.Session(graph=graph) as sess:
with ops_lib.name_scope("new_model"):
new_saver = saver_module.import_meta_graph(
filename + ".meta", graph=graph)
new_saver.restore(sess, filename)
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnImport(self):
# Test that we import a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph()
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=False, import_scope="new_model")
# Device refers to GPU, which is not available here.
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(variables.global_variables_initializer())
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(
meta_graph_def, clear_devices=True, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testClearDevicesOnExport(self):
# Test that we export a graph without its devices and run successfully.
with ops_lib.Graph().as_default():
with ops_lib.device("/job:ps/replica:0/task:0/device:GPU:0"):
image = array_ops.placeholder(dtypes.float32, [None, 784], name="image")
label = array_ops.placeholder(dtypes.float32, [None, 10], name="label")
weights = variables.VariableV1(
random_ops.random_uniform([784, 10]), name="weights")
bias = variables.VariableV1(array_ops.zeros([10]), name="bias")
logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)
nn_ops.softmax(logit, name="prediction")
cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,
logits=logit)
adam.AdamOptimizer().minimize(cost, name="optimize")
meta_graph_def = saver_module.export_meta_graph(clear_devices=True)
graph_io.write_graph(meta_graph_def, self.get_temp_dir(),
"meta_graph.pbtxt")
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
sess.run(["new_model/optimize"], {
"new_model/image:0": np.random.random([1, 784]),
"new_model/label:0": np.random.randint(
10, size=[1, 10])
})
def testPreserveDatasetAndFunctions(self):
with ops_lib.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
_ = array_ops.identity(next_element, name="output")
# Generate three MetaGraphDef protos using different code paths.
meta_graph_def_simple = saver_module.export_meta_graph()
meta_graph_def_devices_cleared = saver_module.export_meta_graph(
clear_devices=True)
meta_graph_def_from_graph_def = saver_module.export_meta_graph(
clear_devices=True, graph_def=g.as_graph_def())
for meta_graph_def in [meta_graph_def_simple,
meta_graph_def_devices_cleared,
meta_graph_def_from_graph_def]:
with session.Session(graph=ops_lib.Graph()) as sess:
saver_module.import_meta_graph(meta_graph_def, import_scope="new_model")
self.evaluate(variables.global_variables_initializer())
for i in range(10):
self.assertEqual(i * i, sess.run("new_model/output:0"))
with self.assertRaises(errors.OutOfRangeError):
sess.run("new_model/output:0")
class CheckpointReaderTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
@test_util.run_deprecated_v1
def testDebugString(self):
# Builds a graph.
v0 = variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
v1 = variables.VariableV1(
[[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name="v1")
init_all_op = variables.global_variables_initializer()
save = saver_module.Saver(
{
"v0": v0,
"v1": v1
}, write_version=self._WRITE_VERSION)
save_path = os.path.join(self.get_temp_dir(),
"ckpt_for_debug_string" + str(self._WRITE_VERSION))
with self.cached_session() as sess:
self.evaluate(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEqual([2, 3], var_map["v0"])
self.assertEqual([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
pywrap_tensorflow.NewCheckpointReader("non-existent")
class CheckpointReaderForV2Test(CheckpointReaderTest):
_WRITE_VERSION = saver_pb2.SaverDef.V2
class WriteGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testWriteGraph(self):
test_dir = self._get_test_dir("write_graph_dir")
variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph(),
os.path.join(test_dir, "l1"), "graph.pbtxt")
truth = os.path.join(test_dir, "l1", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
def testRecursiveCreate(self):
test_dir = self._get_test_dir("deep_dir")
variables.VariableV1(
[[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name="v0")
path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),
os.path.join(test_dir, "l1", "l2", "l3"),
"graph.pbtxt")
truth = os.path.join(test_dir, "l1", "l2", "l3", "graph.pbtxt")
self.assertEqual(path, truth)
self.assertTrue(os.path.exists(path))
class ScopedGraphTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):
graph = ops_lib.Graph()
with graph.as_default():
# Creates an inference graph.
# Hidden 1
images = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
with ops_lib.name_scope("hidden1"):
weights1 = variables.VariableV1(
random_ops.truncated_normal(
[28, 128], stddev=1.0 / math.sqrt(float(28))),
name="weights")
# The use of control_flow_ops.cond here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
biases1 = variables.VariableV1(
control_flow_ops.cond(
math_ops.less(random.random(), 0.5),
lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),
name="biases")
hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights2 = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases2):
biases2 += constant_op.constant(0.1, shape=[32])
return it + 1, biases2
_, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.VariableV1(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights3 = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases3 = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights3) + biases3
ops_lib.add_to_collection("logits", logits)
# Adds user_defined proto in three formats: string, bytes and Any.
# Any proto should just pass through.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
ops_lib.add_to_collection("user_defined_string_collection",
str(queue_runner))
ops_lib.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
ops_lib.add_to_collection("user_defined_any_collection", any_buf)
_, var_list = meta_graph.export_scoped_meta_graph(
filename=os.path.join(test_dir, exported_filename),
graph=ops_lib.get_default_graph(),
export_scope="hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
with self.session(graph=graph) as sess:
self.evaluate(variables.global_variables_initializer())
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)
def _testScopedRestore(self, test_dir, exported_filename,
new_exported_filename, ckpt_filename):
graph = ops_lib.Graph()
# Create all the missing inputs.
with graph.as_default():
new_image = constant_op.constant(
1.2, dtypes.float32, shape=[100, 28], name="images")
var_list = meta_graph.import_scoped_meta_graph(
os.path.join(test_dir, exported_filename),
graph=graph,
input_map={"$unbound_inputs_images": new_image},
import_scope="new_hidden1")
self.assertEqual(["biases:0", "weights:0"], sorted(var_list.keys()))
hidden1 = graph.as_graph_element("new_hidden1/Relu:0")
weights1 = graph.as_graph_element("new_hidden1/weights:0")
biases1 = graph.as_graph_element("new_hidden1/biases:0")
with graph.as_default():
# Hidden 2
with ops_lib.name_scope("hidden2"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[128, 32], stddev=1.0 / math.sqrt(float(128))),
name="weights")
# The use of control_flow_ops.while_loop here is purely for adding test
# coverage the save and restore of control flow context (which doesn't
# make any sense here from a machine learning perspective). The typical
# biases is a simple Variable without the conditions.
def loop_cond(it, _):
return it < 2
def loop_body(it, biases):
biases += constant_op.constant(0.1, shape=[32])
return it + 1, biases
_, biases = control_flow_ops.while_loop(loop_cond, loop_body, [
constant_op.constant(0), variables.VariableV1(array_ops.zeros([32]))
])
hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)
# Linear
with ops_lib.name_scope("softmax_linear"):
weights = variables.VariableV1(
random_ops.truncated_normal(
[32, 10], stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = variables.VariableV1(array_ops.zeros([10]), name="biases")
logits = math_ops.matmul(hidden2, weights) + biases
ops_lib.add_to_collection("logits", logits)
# The rest of the variables.
rest_variables = list(
set(variables.global_variables()) - set(var_list.keys()))
init_rest_op = variables.variables_initializer(rest_variables)
with self.session(graph=graph) as sess:
saver = saver_module.Saver(var_list=var_list, max_to_keep=1)
saver.restore(sess, os.path.join(test_dir, ckpt_filename))
# Verify that we have restored weights1 and biases1.
self.evaluate([weights1, biases1])
# Initialize the rest of the variables and run logits.
self.evaluate(init_rest_op)
self.evaluate(logits)
# Verifies that we can save the subgraph under "hidden1" and restore it
# into "new_hidden1" in the new graph.
@test_util.run_deprecated_v1
def testScopedSaveAndRestore(self):
test_dir = self._get_test_dir("scoped_export_import")
ckpt_filename = "ckpt"
self._testScopedSave(test_dir, "exported_hidden1.pbtxt", ckpt_filename)
self._testScopedRestore(test_dir, "exported_hidden1.pbtxt",
"exported_new_hidden1.pbtxt", ckpt_filename)
# Verifies that we can copy the subgraph under "hidden1" and copy it
# to different name scope in the same graph or different graph.
@test_util.run_deprecated_v1
def testCopyScopedGraph(self):
test_dir = self._get_test_dir("scoped_copy")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.VariableV1(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.VariableV1([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.session(graph=graph1) as sess:
self.evaluate(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies copy to the same graph with the same name fails.
with graph1.as_default():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "need to be different" in str(e)):
meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden1")
# Verifies copy to the same graph.
with graph1.as_default():
var_list_2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1", to_scope="hidden2")
with self.session(graph=graph1) as sess:
saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver1.restore(sess, saver0_ckpt)
saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)
saver2.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("hidden1/relu:0"))
self.assertAllClose(expected, sess.run("hidden2/relu:0"))
# Verifies copy to differen graph.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
@test_util.run_deprecated_v1
def testExportGraphDefWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
graph1 = ops_lib.Graph()
with graph1.as_default():
with ops_lib.name_scope("hidden1"):
images = constant_op.constant(
1.0, dtypes.float32, shape=[3, 2], name="images")
weights1 = variables.VariableV1(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name="weights")
biases1 = variables.VariableV1([0.1] * 3, name="biases")
nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name="relu")
# Run the graph and save scoped checkpoint.
with self.session(graph=graph1) as sess:
self.evaluate(variables.global_variables_initializer())
_, var_list_1 = meta_graph.export_scoped_meta_graph(
graph_def=graph1.as_graph_def(), export_scope="hidden1")
saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)
saver.save(sess, saver0_ckpt, write_state=False)
expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))
# Verifies that we can run successfully after restoring.
graph2 = ops_lib.Graph()
new_var_list_1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph1,
to_graph=graph2)
with self.session(graph=graph2) as sess:
saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)
saver3.restore(sess, saver0_ckpt)
self.assertAllClose(expected, sess.run("new_hidden1/relu:0"))
@test_util.run_deprecated_v1
def testSerializeSaverWithScope(self):
test_dir = self._get_test_dir("export_graph_def")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
saver2_ckpt = os.path.join(test_dir, "saver2.ckpt")
graph = ops_lib.Graph()
with graph.as_default():
with ops_lib.name_scope("hidden1"):
variable1 = variables.VariableV1([1.0], name="variable1")
saver1 = saver_module.Saver(var_list=[variable1])
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)
with ops_lib.name_scope("hidden2"):
variable2 = variables.VariableV1([2.0], name="variable2")
saver2 = saver_module.Saver(var_list=[variable2], name="hidden2/")
graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)
with self.session(graph=graph) as sess:
self.evaluate(variables.global_variables_initializer())
saver1.save(sess, saver1_ckpt, write_state=False)
saver2.save(sess, saver2_ckpt, write_state=False)
graph1 = ops_lib.Graph()
var_dict1 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden1",
to_scope="new_hidden1",
from_graph=graph,
to_graph=graph1)
self.assertEqual(1, len(var_dict1))
saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list1))
with self.session(graph=graph1) as sess:
saver_list1[0].restore(sess, saver1_ckpt)
self.assertEqual(1.0, self.evaluate(var_dict1["variable1:0"]))
graph2 = ops_lib.Graph()
var_dict2 = meta_graph.copy_scoped_meta_graph(
from_scope="hidden2",
to_scope="new_hidden2",
from_graph=graph,
to_graph=graph2)
self.assertEqual(1, len(var_dict2))
saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)
self.assertEqual(1, len(saver_list2))
with self.session(graph=graph2) as sess:
saver_list2[0].restore(sess, saver2_ckpt)
self.assertEqual(2.0, self.evaluate(var_dict2["variable2:0"]))
class _OwnsAVariableSimple(checkpointable_base.Checkpointable):
"""A Checkpointable object which can be saved using a tf.train.Saver."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
def _gather_saveables_for_checkpoint(self):
return {checkpointable_base.VARIABLE_VALUE_KEY: self.non_dep_variable}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class _MirroringSaveable(
saver_module.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable_base.Checkpointable):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {checkpointable_base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class NonLayerCheckpointable(checkpointable_tracking.AutoCheckpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointableCompatibilityTests(test.TestCase):
# TODO(allenl): Track down python3 reference cycles in these tests.
@test_util.run_in_graph_and_eager_modes
def testNotSaveableButIsCheckpointable(self):
v = _OwnsAVariableSimple()
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
for saver in (saver_module.Saver(var_list=[v]),
saver_module.Saver(var_list={"v": v})):
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
for saver in (saver_module.Saver(var_list=[v]),
saver_module.Saver(var_list={"v": v})):
with self.cached_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
def testSingleTensorEvaluation(self):
class _CountingSaveable(saver_module.BaseSaverBuilder.SaveableObject):
def __init__(self, name):
self.eval_count = 0
def _tensor():
self.eval_count += 1
return constant_op.constant([1.])
dummy_op = constant_op.constant([2.])
super(_CountingSaveable, self).__init__(
dummy_op,
[saver_module.BaseSaverBuilder.SaveSpec(
_tensor, "", name, dtype=dummy_op.dtype)],
name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
pass
with context.eager_mode():
v = _CountingSaveable("foo")
saver = saver_module.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
save_path = saver.save(sess, prefix)
self.assertEqual(1, v.eval_count)
saver.restore(sess, save_path)
self.assertEqual(1, v.eval_count)
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def testVariableNotFoundErrorRaised(self):
# Restore does some tricky exception handling to figure out if it should
# load an object-based checkpoint. Tests that the exception handling isn't
# too broad.
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
a = resource_variable_ops.ResourceVariable(1., name="a")
b = resource_variable_ops.ResourceVariable(1., name="b")
a_saver = saver_module.Saver([a])
b_saver = saver_module.Saver([b])
with self.cached_session() as sess:
self.evaluate(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with self.assertRaisesRegexp(
errors.NotFoundError, "Key b not found in checkpoint"):
b_saver.restore(sess=sess, save_path=save_path)
with self.assertRaises(errors.NotFoundError) as cs:
b_saver.restore(sess=sess, save_path=save_path)
# Make sure we don't have a confusing "During handling of the above
# exception" block in Python 3.
self.assertNotIn("NewCheckpointReader", cs.exception.message)
@test_util.run_v1_only("b/120545219")
def testGraphChangedForRestoreErrorRaised(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with ops_lib.Graph().as_default() as g:
a = variables.VariableV1(1., name="a")
a_saver = saver_module.Saver([a])
with self.session(graph=g) as sess:
self.evaluate(a.initializer)
save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
with ops_lib.Graph().as_default() as g:
a = variables.VariableV1([1.], name="a")
a_saver = saver_module.Saver([a])
with self.session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"a mismatch between the current graph and the graph"):
a_saver.restore(sess=sess, save_path=save_path)
def testLoadFromObjectBasedGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph) as sess:
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
# An incompatible object-based checkpoint to check error messages
var = resource_variable_ops.ResourceVariable(1., name="a")
self.evaluate(var.initializer)
second_saver = checkpointable_utils.CheckpointableSaver(var)
second_path = second_saver.save(file_prefix=os.path.join(
checkpoint_directory, "second"))
restore_graph = ops_lib.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as sess:
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver()
saver.restore(sess=sess, save_path=save_path)
self._check_sentinels(root)
before_second_restore_ops = restore_graph.get_operations()
# Test that multiple restores do not pollute the graph
saver.restore(sess=sess, save_path=save_path)
self.assertEqual(before_second_restore_ops,
restore_graph.get_operations())
with self.assertRaisesRegexp(errors.NotFoundError,
"Could not find some variables"):
saver.restore(sess=sess, save_path=second_path)
def testLoadFromObjectBasedEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops_lib.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
saver = saver_module.Saver(
root.model.variables + root.optimizer.variables())
saver.restore(sess=None, save_path=save_path)
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "06066974c1d3fe87173b764747f1a115",
"timestamp": "",
"source": "github",
"line_count": 3048,
"max_line_length": 89,
"avg_line_length": 42.42782152230971,
"alnum_prop": 0.6485230436127436,
"repo_name": "apark263/tensorflow",
"id": "dec23c50e8c069d4f2dd18c49ecdabb447f4872b",
"size": "130008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saver_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "561314"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "54581021"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1373561"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "899393"
},
{
"name": "Jupyter Notebook",
"bytes": "2618454"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75994"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14340"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "44616385"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "504099"
},
{
"name": "Smarty",
"bytes": "10072"
}
],
"symlink_target": ""
}
|
from pyface.action.action_manager_item import *
|
{
"content_hash": "2a8ba630e55ecdb36b2e188b8538386c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.8125,
"repo_name": "enthought/etsproxy",
"id": "d412c919f24ddac63127a84d16585d061ef26378",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/action/action_manager_item.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
import re
#from . import export
#@export
def fuzzyfinder(text, collection):
"""
Args:
text (str): A partial string which is typically entered by a user.
collection (iterable): A collection of strings which will be filtered
based on the input `text`.
Returns:
suggestions (generator): A generator object that produces a list of
suggestions narrowed down from `collections` using the `text`
input.
"""
suggestions = []
pat = '.*?'.join(map(re.escape, text))
regex = re.compile(pat)
for item in collection:
r = regex.search(item)
if r:
suggestions.append((len(r.group()), r.start(), item))
return (z for _, _, z in sorted(suggestions))
|
{
"content_hash": "53c8d957440c75165dd270760278fded",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 31.16,
"alnum_prop": 0.5905006418485238,
"repo_name": "edonyM/toolkitem",
"id": "892f627978d63ee9d329ab8c687f1142371f42f1",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzyfinder/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "919"
},
{
"name": "Python",
"bytes": "301993"
},
{
"name": "Shell",
"bytes": "3676"
}
],
"symlink_target": ""
}
|
from sqlalchemy.orm import joinedload, subqueryload, lazyload
from aquilon.exceptions_ import NotFoundException
from aquilon.aqdb.model import EsxCluster, VirtualMachine, ClusterResource
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.formats.cluster import ClusterList
class CommandShowESXClusterAll(BrokerCommand):
def render(self, session, cluster, **arguments):
q = session.query(EsxCluster)
vm_q = session.query(VirtualMachine)
vm_q = vm_q.join(ClusterResource, EsxCluster)
if cluster:
q = q.filter_by(name=cluster)
vm_q = vm_q.filter_by(name=cluster)
vm_q = vm_q.options(joinedload('machine'),
joinedload('machine.primary_name'),
joinedload('machine.primary_name.fqdn'),
lazyload('machine.host'))
q = q.options(subqueryload('_hosts'),
joinedload('_hosts.host'),
joinedload('_hosts.host.machine'),
subqueryload('_metacluster'),
joinedload('_metacluster.metacluster'),
joinedload('resholder'),
subqueryload('resholder.resources'),
subqueryload('switch'),
joinedload('switch.primary_name'),
joinedload('switch.primary_name.fqdn'),
subqueryload('service_bindings'),
subqueryload('allowed_personalities'))
q = q.order_by(EsxCluster.name)
dbclusters = q.all()
if cluster and not dbclusters:
raise NotFoundException("ESX Cluster %s not found." % cluster)
# Manual eager-loading of VM resources. All the code does is making sure
# the data is pinned in the session's cache
machines = {}
for vm in vm_q:
machines[vm.machine.machine_id] = vm
return ClusterList(dbclusters)
|
{
"content_hash": "12df1fdad756d99bf2b06e8a48ca0481",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 42.808510638297875,
"alnum_prop": 0.5874751491053678,
"repo_name": "stdweird/aquilon",
"id": "6e3dc3f086febd560ac7557868cee744465c8e61",
"size": "2717",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/worker/commands/show_esx_cluster_all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
if os.environ.get('READTHEDOCS', ''):
# RTD doesn't use the repo's Makefile to build docs.
import subprocess
subprocess.run(["sphinx-apidoc", "--force", "-o", "./api", "../../rafem"])
# -- Project information -----------------------------------------------------
project = 'RAFEM'
copyright = '2020, Katherine M. Ratliff'
author = 'Katherine M. Ratliff'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/powered-by-logo-header.png"
html_sidebars = {
"index": [
"sidebarintro.html",
"links.html",
"sourcelink.html",
"searchbox.html",
],
"**": [
"sidebarintro.html",
"links.html",
"sourcelink.html",
"searchbox.html",
]
}
|
{
"content_hash": "8781bc0ea00f24651f2ff711c8aec77c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 28.506666666666668,
"alnum_prop": 0.6202057998129092,
"repo_name": "katmratliff/avulsion-bmi",
"id": "3fd8b59afdd8d16467ab0d8c3018bf0d02705250",
"size": "2691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110517"
}
],
"symlink_target": ""
}
|
import django_filters
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
try:
from django_filters import rest_framework as filters
except ImportError: # Back-ward compatible for django-rest-framework<3.7
from rest_framework import filters
from rest_framework import viewsets
from teryt_tree.models import JednostkaAdministracyjna
from teryt_tree.rest_framework_ext.serializers import JednostkaAdministracyjnaSerializer
def custom_area_filter(queryset, _, value):
if not value:
return queryset
return queryset.area(get_object_or_404(JednostkaAdministracyjna, pk=value))
class JednostkaAdministracyjnaFilter(filters.FilterSet):
area = django_filters.CharFilter(method=custom_area_filter, label=_("Area"))
class Meta:
model = JednostkaAdministracyjna
fields = ["name", "category", "category__level", "area"]
class JednostkaAdministracyjnaViewSet(viewsets.ModelViewSet):
queryset = (
JednostkaAdministracyjna.objects.select_related("category")
.prefetch_related("children")
.all()
)
serializer_class = JednostkaAdministracyjnaSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = JednostkaAdministracyjnaFilter
|
{
"content_hash": "e99aeecc2a8c98e9f368d5598d492d6f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 35.583333333333336,
"alnum_prop": 0.760343481654957,
"repo_name": "ad-m/django-teryt-tree",
"id": "25572a57a80a7638de6579011eb81a2270303655",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teryt_tree/rest_framework_ext/viewsets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "508"
},
{
"name": "Makefile",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "30416"
}
],
"symlink_target": ""
}
|
from ..estimators.estimator_base import H2OEstimator
from h2o.utils.typechecks import Enum
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
class H2OPCA(H2OEstimator):
"""
Principal Component Analysis
"""
algo = "pca"
def __init__(self, model_id=None, k=None, max_iterations=None, seed=None,
transform="NONE",
use_all_factor_levels=False,
pca_method="GramSVD",
pca_impl="mtj_evd_symmmatrix",
ignore_const_cols=True,
impute_missing=False,
compute_metrics=True):
"""
Principal Components Analysis
:param str model_id: The unique hex key assigned to the resulting model. Automatically generated if
none is provided.
:param int k: The number of principal components to be computed. This must be between ``1`` and
``min(ncol(training_frame), nrow(training_frame))`` inclusive.
:param str transform: A character string that indicates how the training data should be transformed
before running PCA. Possible values are:
- ``"NONE"``: for no transformation,
- ``"DEMEAN"``: for subtracting the mean of each column,
- ``"DESCALE"``: for dividing by the standard deviation of each column,
- ``"STANDARDIZE"``: for demeaning and descaling, and
- ``"NORMALIZE"``: for demeaning and dividing each column by its range (max - min).
:param int seed: Random seed used to initialize the right singular vectors at the beginning of each
power method iteration.
:param int max_iterations: The maximum number of iterations when pca_method is "Power".
:param bool use_all_factor_levels: A logical value indicating whether all factor levels should be included
in each categorical column expansion. If False, the indicator column corresponding to the first factor
level of every categorical variable will be dropped. Default is False.
:param str pca_method: A character string that indicates how PCA should be calculated. Possible values are:
- ``"GramSVD"``: distributed computation of the Gram matrix followed by a local SVD using the JAMA package,
- ``"Power"``: computation of the SVD using the power iteration method,
- ``"GLRM"``: fit a generalized low rank model with an l2 loss function (no regularization) and solve for
the SVD using local matrix algebra.
- ``"Randomized"``: computation of the SVD using the randomized method from thesis of Nathan P. Halko,
Randomized methods for computing low-rank approximation of matrices.
:param str pca_impl: A character string that indicates the implementation to use for
computing PCA (via SVD or EVD).
- ``"mtj_evd_densematrix"``: eigenvalue decompositions for dense matrix using MTJ
- ``"mtj_evd_symmmatrix"``: eigenvalue decompositions for symmetric matrix using MTJ
- ``"mtj_svd_densematrix"``: singular-value decompositions for dense matrix using MTJ
- ``"jama"``: eigenvalue decompositions for dense matrix using JAMA
References:
- JAMA: http://math.nist.gov/javanumerics/jama/
- MTJ: https://github.com/fommil/matrix-toolkits-java/
One of the following implementations are available: ``"mtj_evd_densematrix"``,
``"mtj_evd_symmmatrix"``, ``"mtj_svd_densematrix"``, ``"jama"`` (default: ``"mtj_evd_symmmatrix"``).
:param bool ignore_const_cols: If true, will ignore constant columns. Default is True.
:param bool impute_missing: whether to impute NA/missing values.
:param bool compute_metrics: whether to compute metrics on training data. Default is True.
:returns: A new instance of H2OPCA.
"""
super(H2OPCA, self).__init__()
self._parms = locals()
self._parms = {k: v for k, v in self._parms.items() if k != "self"}
assert_is_type(pca_method, Enum("GramSVD", "Power", "GLRM", "Randomized"))
self._parms["pca_method"] = pca_method
assert_is_type(pca_impl, Enum("MTJ_EVD_DENSEMATRIX", "MTJ_EVD_SYMMMATRIX", "MTJ_SVD_DENSEMATRIX", "JAMA"))
self._parms["pca_impl"] = pca_impl
assert_is_type(transform, Enum("NONE", "DEMEAN", "DESCALE", "STANDARDIZE", "NORMALIZE"))
self._parms["transform"] = transform
def fit(self, X, y=None, **params):
return super(H2OPCA, self).fit(X)
def transform(self, X, y=None, **params):
"""
Transform the given H2OFrame with the fitted PCA model.
:param H2OFrame X: May contain NAs and/or categorical data.
:param H2OFrame y: Ignored for PCA. Should be None.
:param params: Ignored.
:returns: The input H2OFrame transformed by the Principal Components.
"""
return self.predict(X)
class H2OSVD(H2OEstimator):
"""Singular Value Decomposition"""
algo = "svd"
def __init__(self, nv=None, max_iterations=None, transform=None, seed=None,
use_all_factor_levels=None, svd_method="GramSVD"):
"""
Singular value decomposition of an H2OFrame.
:param int nv: The number of right singular vectors to be computed. This must be between 1 and
min(ncol(training_frame), snrow(training_frame)) inclusive.
:param int max_iterations: The maximum number of iterations to run each power iteration loop. Must be
between 1 and 1e6 inclusive.
:param str transform: A character string that indicates how the training data should be transformed
before running SVD. Possible values are:
- ``"NONE"``: for no transformation,
- ``"DEMEAN"``: for subtracting the mean of each column,
- ``"DESCALE"``: for dividing by the standard deviation of each column,
- ``"STANDARDIZE"``: for demeaning and descaling, and
- ``"NORMALIZE"``: for demeaning and dividing each column by its range (max - min).
:param int seed: Random seed used to initialize the right singular vectors at the beginning of each
power method iteration.
:param bool use_all_factor_levels: A logical value indicating whether all factor levels should be included
in each categorical column expansion. If False, the indicator column corresponding to the first factor
level of every categorical variable will be dropped. Defaults to True.
:param str svd_method: A character string that indicates how SVD should be calculated. Possible values are:
- ``"GramSVD"``: distributed computation of the Gram matrix followed by a local SVD
using the JAMA package,
- ``"Power"``: computation of the SVD using the power iteration method,
- ``"Randomized"``: approximate SVD by projecting onto a random subspace.
:returns: a new H2OSVD model
"""
super(H2OSVD, self).__init__()
self._parms = locals()
self._parms = {k: v for k, v in self._parms.items() if k != "self"}
assert_is_type(svd_method, Enum("GramSVD", "Power", "GLRM", "Randomized"))
self._parms["svd_method"] = svd_method
assert_is_type(transform, Enum("NONE", "DEMEAN", "DESCALE", "STANDARDIZE", "NORMALIZE"))
self._parms["transform"]=transform
self._parms['_rest_version'] = 99
def fit(self, X, y=None, **params):
return super(H2OSVD, self).fit(X)
def transform(self, X, y=None, **params):
"""
Transform the given H2OFrame with the fitted SVD model.
:param H2OFrame X: May contain NAs and/or categorical data.
:param H2OFrame y: Ignored for SVD. Should be None.
:param params: Ignored.
:returns: The input H2OFrame transformed by the SVD.
"""
return self.predict(X)
|
{
"content_hash": "6ff7cba80910109eb2ddb7823b1f1f29",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 119,
"avg_line_length": 50.7,
"alnum_prop": 0.6394230769230769,
"repo_name": "ryfeus/lambda-packs",
"id": "0326903b24beddcd1cf2e69355f07d7ca44f0ae9",
"size": "8112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "H2O/ArchiveH2O/h2o/transforms/decomposition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
from mock import patch
from django.template.loader import get_template
from mozillians.groups.tasks import email_membership_change
from nose.tools import eq_, ok_
from django.conf import settings
from mozillians.common.tests import TestCase
from mozillians.groups import tasks
from mozillians.groups.models import Group, GroupMembership, Skill
from mozillians.groups.tests import GroupFactory, SkillFactory
from mozillians.users.tests import UserFactory
class SendPendingMembershipEmailsTests(TestCase):
def test_remove_empty_groups(self):
user = UserFactory.create()
group_1 = GroupFactory.create()
GroupFactory.create()
skill_1 = SkillFactory.create()
SkillFactory.create()
group_1.add_member(user.userprofile)
skill_1.members.add(user.userprofile)
tasks.remove_empty_groups()
eq_(Group.objects.all().count(), 1)
ok_(Group.objects.filter(id=group_1.id).exists())
eq_(Skill.objects.all().count(), 1)
ok_(Skill.objects.filter(id=skill_1.id).exists())
def test_sending_pending_email(self):
# If a curated group has a pending membership, added since the reminder email
# was last sent, send the curator an email. It should contain the count of
# all pending memberships.
curator = UserFactory.create()
group = GroupFactory.create(curator=curator.userprofile)
# Add a couple of pending memberships
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(mock_send_mail.called)
# Should only have been called once
eq_(1, len(mock_send_mail.call_args_list))
# The message body should mention that there are 2 pending memberships
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_('2 outstanding requests to join Mozillians group "%s"' % group.name, subject)
ok_('There are 2 outstanding requests' in body)
# Full path to group page is in the message
ok_(group.get_absolute_url() in body)
ok_(curator.email in to_list)
# Add another pending membership
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
# Should send email again
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(mock_send_mail.called)
def test_sending_pending_email_singular(self):
# If a curated group has exactly one pending membership, added since the reminder email
# was last sent, send the curator an email. It should contain the count of
# all pending memberships, which should be one, and should use the singular text.
curator = UserFactory.create()
group = GroupFactory.create(curator=curator.userprofile)
# Add one pending membership
group.add_member(UserFactory.create().userprofile, GroupMembership.PENDING)
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(mock_send_mail.called)
# The message body should mention that there is 1 pending memberships
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_('1 outstanding request to join Mozillians group "%s"' % group.name, subject)
ok_('There is 1 outstanding request' in body)
# Full path to group page is in the message
ok_(group.get_absolute_url() in body)
ok_(curator.email in to_list)
def test_sending_pending_email_already_sent(self):
# If a curated group has a pending membership, but it was added before the
# last time a reminder email was sent, do not send the curator an email.
# curated group:
group = GroupFactory.create(curator=UserFactory.create().userprofile)
# Pending membership
user1 = UserFactory.create()
group.add_member(user1.userprofile, GroupMembership.PENDING)
membership = GroupMembership.objects.get(userprofile=user1.userprofile, group=group)
membership.save()
# Send email. This should update the field remembering the max pending request pk.
tasks.send_pending_membership_emails()
# Non-pending membership
user2 = UserFactory.create()
group.add_member(user2.userprofile, GroupMembership.MEMBER)
# None of this should trigger an email send
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(not mock_send_mail.called)
def test_sending_pending_email_non_curated(self):
# If a non-curated group has a pending membership, do not send anyone an email
group = GroupFactory.create()
user = UserFactory.create()
group.add_member(user.userprofile, GroupMembership.PENDING)
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
tasks.send_pending_membership_emails()
ok_(not mock_send_mail.called)
class EmailMembershipChangeTests(TestCase):
def setUp(self):
self.group = GroupFactory.create(curator=UserFactory.create().userprofile)
self.user = UserFactory.create()
def test_member_accepted(self):
template_name = 'groups/email/accepted.txt'
template = get_template(template_name)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
mock_get_template.return_value = template
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
email_membership_change(self.group.pk, self.user.pk,
GroupMembership.PENDING, GroupMembership.MEMBER)
ok_(mock_send_mail.called)
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_(settings.FROM_NOREPLY, from_addr)
eq_([self.user.email], to_list)
eq_('Accepted to Mozillians group "%s"' % self.group.name, subject)
ok_('You have been accepted' in body)
def test_member_rejected(self):
template_name = 'groups/email/rejected.txt'
template = get_template(template_name)
with patch('mozillians.groups.tasks.get_template', autospec=True) as mock_get_template:
mock_get_template.return_value = template
with patch('mozillians.groups.tasks.send_mail', autospec=True) as mock_send_mail:
email_membership_change(self.group.pk, self.user.pk,
GroupMembership.PENDING, None)
ok_(mock_send_mail.called)
ok_(mock_get_template.called)
eq_(template_name, mock_get_template.call_args[0][0])
subject, body, from_addr, to_list = mock_send_mail.call_args[0]
eq_(settings.FROM_NOREPLY, from_addr)
eq_([self.user.email], to_list)
eq_('Not accepted to Mozillians group "%s"' % self.group.name, subject)
ok_('You have not been accepted' in body)
|
{
"content_hash": "28f9c94805318bba896f4cac2256be0d",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 95,
"avg_line_length": 46.65625,
"alnum_prop": 0.6711319490957803,
"repo_name": "ChristineLaMuse/mozillians",
"id": "5909156490a83d2ca2b8cf81252fa876832a5494",
"size": "7465",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mozillians/groups/tests/test_tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "204258"
},
{
"name": "JavaScript",
"bytes": "89948"
},
{
"name": "Makefile",
"bytes": "5580"
},
{
"name": "Python",
"bytes": "8218039"
},
{
"name": "Shell",
"bytes": "13340"
}
],
"symlink_target": ""
}
|
import glob
import os
import queue
import subprocess
import sys
import threading
import time
script_dir = os.path.dirname(os.path.realpath(__file__))
# Test executable
process_command = os.environ.get(
'CLIENT_SERVER_DYNAMIC_DISCOVERY_BIN')
if not process_command:
process_files = glob.glob(
os.path.join(
script_dir,
'**/DDSParticipantDiscovery*'),
recursive=True)
pf = iter(process_files)
process_command = next(pf, None)
while process_command and \
(not os.path.isfile(process_command)
or not os.access(process_command,
os.X_OK)):
process_command = next(pf, None)
assert(process_command)
# Thread that read process output and push it into a queue
def output_reader(proc, outq):
for line in iter(proc.stdout.readline, b''):
outq.put(line.decode('utf-8'))
def first_step(outq):
first_step_fulfilled = False
server_1_discover_client = False
count = 0
initial_time = time.time()
while not first_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
assert '44.53.00.5f.45.50.52.4f.53.49.4d.41' in line
assert 'discovered participant' in line
count = count + 1
if 'discovered participant 44.53.00.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 1' in line:
print('CLIENT OVERRIDE discovered SERVER 1')
server_1_discover_client = True
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if server_1_discover_client and count >= 2 and (time.time() - initial_time) > 2:
if count == 2:
first_step_fulfilled = True
else:
print('ERROR: More discoveries than expected')
stop_threads = True
sys.exit(1)
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def second_step(outq):
second_step_fulfilled = False
server_2_discover_client = False
client_2_warning = False
count = 0
initial_time = time.time()
while not second_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if 'discovered participant' in line:
assert '44.53.01.5f.45.50.52.4f.53.49.4d.41' in line
count = count + 1
if 'discovered participant 44.53.01.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 2' in line:
print('CLIENT OVERRIDE discovered SERVER 2')
server_2_discover_client = True
elif 'Warning' in line:
# Client 2 does not discover anyone
assert 'Trying to add Discovery Servers to a participant which is not a SERVER, BACKUP or an' in line
assert 'overriden CLIENT (SIMPLE participant transformed into CLIENT with the environment variable)' in line
client_2_warning = True
else:
assert 'detected changes on participant' in line
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if server_2_discover_client and client_2_warning and count >= 2 and (time.time() - initial_time) > 2:
if count == 2:
second_step_fulfilled = True
else:
print('ERROR: More discoveries than expected')
stop_threads = True
sys.exit(1)
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def third_step(outq):
third_step_fulfilled = False
server_1_discover_server_2 = False
server_2_discover_server_1 = False
initial_time = time.time()
while not third_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if 'Participant 44.53.01.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1 discovered participant' in line and \
'44.53.00.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 2' in line:
print ('SERVER 2 discovers SERVER 1')
server_2_discover_server_1 = True
elif 'Participant 44.53.00.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1 discovered participant' in line and \
'44.53.01.5f.45.50.52.4f.53.49.4d.41|0.0.1.c1: 2' in line:
print ('SERVER 1 discovers SERVER 2')
server_1_discover_server_2 = True
else:
assert 'detected changes on participant' in line
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if server_1_discover_server_2 and server_2_discover_server_1 and (time.time() - initial_time) > 2:
third_step_fulfilled = True
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def fourth_step(outq):
fourth_step_fulfilled = False
warning_client_1 = False
warning_client_2 = False
count = 0
initial_time = time.time()
while not fourth_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if 'Trying to add Discovery Servers to a participant which is not a SERVER, BACKUP or an' in line \
and 'overriden CLIENT (SIMPLE participant transformed into CLIENT with the environment variable)' in line:
warning_client_2 = True
elif 'Discovery Servers cannot add/modify their locators' in line:
warning_client_1 = True
elif 'discovered participant' in line:
count = count + 1
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if warning_client_1 and warning_client_2 and count == 0 and (time.time() - initial_time) > 2:
fourth_step_fulfilled = True
elif count > 0:
print('ERROR: More discoveries than expected')
stop_threads = True
sys.exit(1)
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def fifth_step(outq):
fifth_step_fulfilled = False
warning = False
count = 0
initial_time = time.time()
while not fifth_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if 'Discovery Servers cannot add/modify their locators' in line:
warning = True
elif 'discovered participant' in line:
count = count + 1
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if warning and count == 0 and (time.time() - initial_time) > 2:
fifth_step_fulfilled = True
elif count > 0:
print('ERROR: More discoveries than expected')
stop_threads = True
sys.exit(1)
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def sixth_step(outq):
sixth_step_fulfilled = False
warning_client_1 = False
warning_client_2 = False
count = 0
initial_time = time.time()
while not sixth_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if 'Trying to add Discovery Servers to a participant which is not a SERVER, BACKUP or an' in line \
and 'overriden CLIENT (SIMPLE participant transformed into CLIENT with the environment variable)' in line:
warning_client_2 = True
elif 'Discovery Servers cannot be removed from the list; they can only be added' in line:
warning_client_1 = True
elif 'discovered participant' in line:
count = count + 1
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if warning_client_1 and warning_client_2 and count == 0 and (time.time() - initial_time) > 2:
sixth_step_fulfilled = True
elif count > 0:
print('ERROR: More discoveries than expected')
stop_threads = True
sys.exit(1)
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def seventh_step(outq):
seventh_step_fulfilled = False
warning = False
count = 0
initial_time = time.time()
while not seventh_step_fulfilled:
global stop_threads
if stop_threads:
break
try:
line = outq.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if 'Discovery Servers cannot be removed from the list; they can only be added' in line:
warning = True
elif 'discovered participant' in line:
count = count + 1
except queue.Empty:
# Ensure that 2 s has passed so the file watch can detect that the file has changed
if warning and count == 0 and (time.time() - initial_time) > 2:
seventh_step_fulfilled = True
elif count > 0:
print('ERROR: More discoveries than expected')
stop_threads = True
sys.exit(1)
sys.stdout.flush()
except AssertionError:
print('ASSERTION ERROR: ' + line)
stop_threads = True
sys.exit(1)
time.sleep(0.1)
def exit(cv):
cv.release()
print('ERROR: timeout without expected discovery happening')
global stop_threads
stop_threads = True
os.remove(server_1_env_file)
os.remove(server_2_env_file)
os.remove(client_env_file)
sys.exit(1)
def communication(proc, outq, outt, cv):
"""A"""
t = threading.Thread(target=output_reader, args=(proc,outq))
t.start()
try:
time.sleep(0.2)
while True:
global stop_threads
if stop_threads:
break
try:
line = outt.get(block=False).rstrip()
print(line)
sys.stdout.flush()
if "FIRST STEP" in line:
first_step(outq)
cv.acquire()
cv.notify()
cv.release()
elif "SECOND STEP" in line:
second_step(outq)
cv.acquire()
cv.notify()
cv.release()
elif "THIRD STEP" in line:
third_step(outq)
cv.acquire()
cv.notify()
cv.release()
elif "FOURTH STEP" in line:
fourth_step(outq)
cv.acquire()
cv.notify()
cv.release()
elif "FIFTH STEP" in line:
fifth_step(outq)
cv.acquire()
cv.notify()
cv.release()
elif "SIXTH STEP" in line:
sixth_step(outq)
cv.acquire()
cv.notify()
cv.release()
elif "SEVENTH STEP" in line:
seventh_step(outq)
cv.acquire()
cv.notify()
cv.release()
except queue.Empty:
sys.stdout.flush()
time.sleep(0.1)
finally:
proc.terminate()
while outq.empty() != True:
line = outq.get(block=False).rstrip()
print(line)
t.join()
# Random unicast port
random_port_server_1 = os.environ.get(
'W_UNICAST_PORT_RANDOM_NUMBER')
random_port_server_2 = str(int(random_port_server_1) + 1)
# Condition variable
cv = threading.Condition()
# Environment files
server_1_env_file = "server_1_env_file.json"
server_2_env_file = "server_2_env_file.json"
client_env_file = "client_env_file.json"
# Both server environment files are created empty
open(server_1_env_file, 'w+').close()
open(server_2_env_file, 'w+').close()
# Client environment file should include the locator for the first server
f = open(client_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": "localhost:')
f.write(random_port_server_1)
f.write('"}')
f.close()
outq = queue.Queue()
outt = queue.Queue()
outt.put("TEST RUNNING\n")
outt.put("FIRST STEP: Override Client discovers Server 1\n")
server_1_process = subprocess.Popen([process_command,
"--discovery_protocol", "SERVER",
"--guid_prefix", "44.53.00.5F.45.50.52.4F.53.49.4D.41",
"--unicast_metatraffic_locator", random_port_server_1],
env={"FASTDDS_ENVIRONMENT_FILE": server_1_env_file},
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
server_2_process = subprocess.Popen([process_command,
"--discovery_protocol", "SERVER",
"--guid_prefix", "44.53.01.5F.45.50.52.4F.53.49.4D.41",
"--unicast_metatraffic_locator", random_port_server_2],
env={"FASTDDS_ENVIRONMENT_FILE": server_2_env_file},
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# The client must be DiscoveryProtocol::SIMPLE to use the environment variable
client_override_process = subprocess.Popen(process_command,
env={"FASTDDS_ENVIRONMENT_FILE": client_env_file},
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# DiscoveryProtocol::CLIENT, environment variable does not apply either initializing as updating
client_process = subprocess.Popen([process_command,
"--discovery_protocol", "CLIENT"],
env={"FASTDDS_ENVIRONMENT_FILE": client_env_file},
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stop_threads = False
t_0 = threading.Thread(target=communication, args=(server_1_process,outq,outt,cv))
t_1 = threading.Thread(target=communication, args=(server_2_process,outq,outt,cv))
t_2 = threading.Thread(target=communication, args=(client_override_process,outq,outt,cv))
t_3 = threading.Thread(target=communication, args=(client_process,outq,outt,cv))
t_0.start()
t_1.start()
t_2.start()
t_3.start()
# Wait 10 seconds for the condition variable to be notified
cv.acquire()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("SECOND STEP: Override Client discovers both Servers. Normal Client does not discover anyone\n")
# Add second server to client
f = open(client_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": "localhost:')
f.write(random_port_server_1)
f.write(';localhost:')
f.write(random_port_server_2)
f.write('"}')
f.close()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("THIRD STEP: Both Servers discover each other\n")
# Add second server to first server
f = open(server_1_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": ";localhost:')
f.write(random_port_server_2)
f.write('"}')
f.close()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("FOURTH STEP: Changing a Server locator from the Client list outputs a Log Warning\n")
# Change first server locator from client list
f = open(client_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": "localhost:11811;localhost:')
f.write(random_port_server_2)
f.write('"}')
f.close()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("FIFTH STEP: Changing a Server locator from the Server list outputs a Log Warning\n")
# Change server locator from server list
f = open(server_1_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": ";localhost:11811"}')
f.close()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("SIXTH STEP: Removing a Server from the Client list outputs a Log Warning\n")
# Remove first server from client list
f = open(client_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": ";localhost:')
f.write(random_port_server_2)
f.write('"}')
f.close()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("SEVENTH STEP: Removing a Server from the Server list outputs a Log Warning\n")
# Remove server from the server 1 list
f = open(server_1_env_file, 'w+')
f.write('{"ROS_DISCOVERY_SERVER": ""}')
f.close()
result = cv.wait(10)
if result == False:
exit(cv)
outt.put("Killing processes\n")
cv.release()
# Kill processes
stop_threads = True
t_0.join()
t_1.join()
t_2.join()
t_3.join()
# Delete files
os.remove(server_1_env_file)
os.remove(server_2_env_file)
os.remove(client_env_file)
|
{
"content_hash": "6242e66e38f57eddf5f6a5718931c1ed",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 124,
"avg_line_length": 34.06896551724138,
"alnum_prop": 0.5820400359874044,
"repo_name": "eProsima/Fast-RTPS",
"id": "6d26963f092124d5fc25d04359bc048e2c855e28",
"size": "18400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/dds/discovery/client_server_dynamic_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7924610"
},
{
"name": "C++",
"bytes": "11331003"
},
{
"name": "CMake",
"bytes": "256031"
},
{
"name": "CSS",
"bytes": "28054"
},
{
"name": "HTML",
"bytes": "2698"
},
{
"name": "M4",
"bytes": "16089"
},
{
"name": "Makefile",
"bytes": "59826"
},
{
"name": "NSIS",
"bytes": "13213"
},
{
"name": "Python",
"bytes": "44640"
},
{
"name": "Shell",
"bytes": "3586"
},
{
"name": "Thrift",
"bytes": "6041"
}
],
"symlink_target": ""
}
|
import os
from pilot.control.payloads import generic
from pilot.util.container import execute
import logging
logger = logging.getLogger(__name__)
class Executor(generic.Executor):
def __init__(self, args, job, out, err, traces):
super(Executor, self).__init__(args, job, out, err, traces)
def untar_file(self, lfn, job):
pfn = os.path.join(job.workdir, lfn)
command = "tar -xf %s -C %s" % (pfn, job.workdir)
logger.info("Untar file: %s" % command)
exit_code, stdout, stderr = execute(command)
logger.info("exit_code: %s, stdout: %s, stderr: %s\n" % (exit_code, stdout, stderr))
def utility_before_payload(self, job):
"""
Functions to run before payload
Note: this function updates job.jobparams (process_writetofile() call)
:param job: job object
"""
logger.info("untar input tar files for eventservicemerge job")
for fspec in job.indata:
if fspec.is_tar:
self.untar_file(fspec.lfn, job)
logger.info("Processing writeToFile for eventservicemerge job")
job.process_writetofile()
super(Executor, self).utility_before_payload(job)
|
{
"content_hash": "abf88dd5caa6f6c07030cb229c1f473b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 92,
"avg_line_length": 31.763157894736842,
"alnum_prop": 0.6296603148301574,
"repo_name": "PalNilsson/pilot2",
"id": "a23c00b2e5d7e8e775487d3760bdc784ba5c744a",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pilot/control/payloads/eventservicemerge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1098187"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
}
|
'''
Author: Bu Kun
E-mail: bukun@osgeo.cn
CopyRight: http://www.yunsuan.org
'''
from torlite.model.core_tab import CabCatalog
class MCatalog():
def __init__(self):
self.tab = CabCatalog
try:
CabCatalog.create_table()
except:
pass
def query_all(self, with_count=False, by_order=False):
if with_count == True:
recs = self.tab.select().order_by(self.tab.post_count.desc())
elif by_order == True:
recs = self.tab.select().order_by(self.tab.order)
else:
recs = self.tab.select().order_by(self.tab.name)
return (recs)
def query_field_count(self, limit_num):
recs = self.tab.select().order_by(self.tab.app_count.desc()).limit(limit_num)
return (recs)
def get_by_slug(self, slug):
return self.tab.get(slug=slug)
def get_by_id(self, idd):
return self.tab.get(id_cat=idd)
def update_app_catalog_num(self, cat_id, num):
entry = self.tab.update(
app_count=num,
).where(self.tab.uid == cat_id)
entry.execute()
def update_post_catalog_num(self, cat_id, num):
entry = self.tab.update(
post_count=num,
).where(self.tab.uid == cat_id)
entry.execute()
def initial_db(self, post_data):
entry = self.tab.create(
name=post_data['name'],
id_cat=post_data['id_cat'],
slug=post_data['slug'],
order=post_data['order'],
)
|
{
"content_hash": "526fe1503dbee173bbb32c684b8ffe06",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 85,
"avg_line_length": 28.196428571428573,
"alnum_prop": 0.538948701709943,
"repo_name": "jiaxiaolei/TorCMS",
"id": "190aba2b5d7b6d67e9be78ee70ea2c6488cfed6e",
"size": "1603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torlite/model/mcatalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "172628"
},
{
"name": "HTML",
"bytes": "100456"
},
{
"name": "JavaScript",
"bytes": "112477"
},
{
"name": "PLpgSQL",
"bytes": "339347"
},
{
"name": "Python",
"bytes": "147076"
},
{
"name": "Shell",
"bytes": "505"
}
],
"symlink_target": ""
}
|
"""
oauthlib.oauth1.rfc5849.endpoints.signature_only
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the signing logic of OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
import logging
from .. import errors
from .base import BaseEndpoint
log = logging.getLogger(__name__)
class SignatureOnlyEndpoint(BaseEndpoint):
"""An endpoint only responsible for verifying an oauth signature."""
def validate_request(self, uri, http_method='GET',
body=None, headers=None):
"""Validate a signed OAuth request.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error as err:
log.info(
'Exception caught while validating request, %s.' % err)
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error as err:
log.info(
'Exception caught while validating request, %s.' % err)
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request):
log.debug('[Failure] verification failed: timestamp/nonce')
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
valid_signature = self._check_signature(request)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid signature: %s", valid_signature)
return v, request
|
{
"content_hash": "8504834591b3d2c39e7c75002f70f0ba",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 40.493975903614455,
"alnum_prop": 0.6349300803332342,
"repo_name": "kylebebak/Requester",
"id": "42977706d0ba8cc7d546a7b10f505bf0279ad697",
"size": "3385",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "deps/oauthlib/oauth1/rfc5849/endpoints/signature_only.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178949"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('phone_numbers', '0001_initial'),
('sims', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='phonenumber',
name='related_sim',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phone_numbers', to='sims.Sim'),
),
]
|
{
"content_hash": "7779c9b52e7c495a6993e0b454bd49b4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 137,
"avg_line_length": 25.227272727272727,
"alnum_prop": 0.6252252252252253,
"repo_name": "RobSpectre/garfield",
"id": "9e531d028911f49dbacc023fe83a8d0b0a7984a5",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "garfield/phone_numbers/migrations/0002_phonenumber_related_sim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7324"
},
{
"name": "Python",
"bytes": "339908"
}
],
"symlink_target": ""
}
|
import pytest
@pytest.fixture(scope='function', autouse=True)
def mock_settings(monkeypatch):
class Settings:
ALIASES = ''
def __getattr__(self, item):
return None
settings = Settings()
monkeypatch.setattr('slackbot.settings', settings)
monkeypatch.setattr('slackbot.dispatcher.settings', settings)
|
{
"content_hash": "843f4fde017a2b25c44c36c6425f608b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 65,
"avg_line_length": 24.785714285714285,
"alnum_prop": 0.6714697406340058,
"repo_name": "pengzhangdev/slackbot",
"id": "a5ac345111248ab9d2b079a99b682d389d5a8828",
"size": "347",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/unit/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "288383"
},
{
"name": "Shell",
"bytes": "3443"
}
],
"symlink_target": ""
}
|
import json
import logging
import random
import urllib
import urllib2
#app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
#Imports nossos
import bds
import comandos as c
import preGame as p
import game as g
#TOKEN TESTE: 105794279:AAEZQkZX-HnXHMBG8NHkc0CWyDjvpOnHM-U
#TOKEN BOT REAL: 130009542:AAHNWctXOV5L_BPf7TTnFTgmQi6O7zD89Rw ***SÓ MUDAR DEPOIS DE TESTAR TODOS OS BUGS***
#TOKEN do bot no telegram
TOKEN = '105794279:AAEZQkZX-HnXHMBG8NHkc0CWyDjvpOnHM-U'
#URL base para funcionamento do sistema de Webhook
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
#Versão atual
VERSION = '2.a'
#Nossos IDs
creators = ['112228809', '112255461']
#Línguas suportadas
linguas = ['português(br)', 'english(us)']
# ==================================================
#Metodos que configuram a conexao do telegram com o App Engine
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
#Função que verifica se o forca_bot foi excluido ou se ele existe no BD
def verifyBot(left_chat_participant = None):
if left_chat_participant:
first_name = left_chat_participant['first_name'].encode('utf-8')
if first_name == '@hangerbot':
bds.delChat(chat_id)
return
return
#Dados que recebemos do telegram
update_id = body['update_id']
message = body['message']
message_id = str(message.get('message_id')).encode('utf-8')
left_chat_participant = message.get('left_chat_participant')
new_chat_participant = message.get('new_chat_participant')
group_chat_created = message.get('group_chat_created')
date = message.get('date')
text = message.get('text').encode('utf-8').lower() if message.get('text') else message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = str(chat['id'])
user_id = message['from']
u_id = str(user_id.get('id')).encode('utf-8')
u_name = user_id.get('first_name').encode('utf-8')
#'Chama' a verificação
if left_chat_participant:
verifyBot(left_chat_participant = left_chat_participant)
if not text:
logging.info('no text')
return
#Função que envia o dict para o Telegram
def reply(dict = None):
if dict:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode(dict)).read()
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
#Lê as configurações
def getLanguage(chat_id):
s = bds.getSettings(chat_id) #Classe settings
if s:
if s.language == 'ptBR':
import ptBR as l
return l
elif s.language == 'enUS':
import enUS as l
return l
else:
bds.checkChat(chat_id)
s = bds.getSettings(chat_id) #Classe settings
if s.language == 'ptBR':
import ptBR as l
return l
elif s.language == 'enUS':
import enUS as l
return l
return
#Aqui começa a lógica principal
l = getLanguage(chat_id)
s = bds.getSettings(chat_id)
ab = bds.getArriscarBlock(chat_id)
first = bds.getFirstWelcome(chat_id)[0]
rpl = [c.toDict(chat_id, 'comando não reconhecido')]
text = '/start' if text == l.ligar.lower() else text #Tratamento para o caso do /start
text = l.ajuda.lower() if text.startswith('/help') else text
text = l.desligar.lower() if text.startswith('/stop') else text
if text.startswith('@ccuem_bot'):
text = text[11:]
if u_id in creators:
if text.startswith('/delchatadmin'):
chat = text[14:]
if len(chat) > 0:
if bds.delChat(chat):
rpl = [c.toDict(chat_id, 'Chat '+chat+'deletado')]
rpl = [c.toDict(chat_id, 'Chat '+chat+' não existe')]
if (not s.waiting) or first:
#comandos que indiferem do estado atual de jogo
if '/start' in text:
rpl = c.start(chat_id, u_id, message_id, first)
elif bds.getEnabled(chat_id):
if '/kb' in text:
rpl = c.kb(chat_id, u_id, message_id)
elif l.desligar.lower() in text:
rpl = c.stop(chat_id)
elif l.ajuda.lower() in text:
rpl = c.ajuda(chat_id)
elif l.rank.lower() in text:
rpl = c.rank(chat_id)
elif l.config.lower() in text:
rpl = c.config(chat_id, message_id)
elif l.voltar.lower() in text:
rpl = c.voltar(chat_id, l.voltar_msg, message_id, u_id)
elif l.comandos.lower() in text:
rpl = c.comandos(chat_id, message_id, u_id)
#comandos inGame
elif bds.getInGame(chat_id):
check = bds.checkUid(chat_id, u_id)
if l.cancelar_jogo.lower() in text:
rpl = g.cancelarJogo(chat_id, u_id)
elif check == True:
if bds.getArriscarBlock(chat_id):
rpl = g.arriscarPalavra2(chat_id, u_id, u_name, message_id, text)
elif l.arriscar.lower() in text:
rpl = g.arriscarPalavra1(chat_id, u_id, message_id)
elif (len(text) == 1) or (text.startswith('@ccuemBot')):
if text.startswith('@ccuemBot'):
text = text[10:]
rpl = g.chutarLetra(chat_id, u_id, message_id, text)
elif check == 'rnd':
rpl = [c.toDict(chat_id, l.round_errado_msg)]
elif check == 'out':
rpl = [c.toDict(chat_id, l.fora_msg)]
#comandos preGame
elif bds.getPreGame(chat_id):
if l.entrar.lower() in text:
rpl = p.entrar(chat_id, u_id, u_name, message_id)
elif l.sair.lower() in text:
rpl = p.sair(chat_id, u_id, u_name, message_id)
elif l.fechar_jogo.lower() in text:
rpl = p.fecharJogo(chat_id, u_id, message_id)
elif l.cancelar_jogo.lower() in text:
rpl = p.cancelarJogo(chat_id, u_id)
#se preGame e inGame == False (vide flowchart)
elif (not bds.getPreGame(chat_id)) and (not bds.getInGame(chat_id)):
if l.novojogo.lower() in text:
rpl = c.novojogo(chat_id, u_id, u_name, message_id)
else:
if l.ajuda.lower() in text:
rpl = c.ajuda(chat_id)
else:
rpl = c.changeLanguage(chat_id, text, message_id, u_id)
try:
for i in range(len(rpl)):
reply(rpl[i])
except Exception, e:
print e
reply(c.toDict(chat_id, 'erro'))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
{
"content_hash": "2e30e0eb314fecced9b9057edc947cce",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 128,
"avg_line_length": 40.39252336448598,
"alnum_prop": 0.5321610365571494,
"repo_name": "0Cristofer/PlayHangmanBot",
"id": "5926537fb64b76e1803c4fac078f8551e203e3b6",
"size": "8726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "61054"
}
],
"symlink_target": ""
}
|
"""
This is a Python client for the Lastline Analyst API.
The :py:class:`AnalysisClient` class implements the client side of the Lastline Analyst API
methods. It can be imported into Python client code that uses the API.
The client is available at https://analysis.lastline.com/docs/llapi_client/analysis_apiclient.py .
Requirements
+++++++++++++++++++
The Analysis API client requires:
- Python 2.7.
- The python requests module (tested with version 2.2.1).
- The python simplejson module (tested with version 3.6.5).
- To use the client as a python shell, the ipython module (tested with version 2.4.1).
Required python modules can be installed using tools such as apt, pip, or easy_install, e.g.::
apt-get install python-pycurl=7.19.0-4ubuntu3
pip install ipython==2.4.1
easy_install requests==2.2.1
.. note::
You may want to consider installing the API client and its dependencies inside an isolated
environment, such as a container, schroot, or VirtualEnv. This allows experimenting with the
Lastline APIs without affecting system libraries/modules.
Changelog
+++++++++++++++++++++++
The changelog only reflects backwards-incompatible changes; new functionality
may not be reflected in all cases
- 2016-10-05: Stop download of full report details during submission
Submission functions, such as ``submit_file()``, ``submit_file_hash()``,
or ``submit_url()``, now default to
``full_report_score=ANALYSIS_API_NO_REPORT_DETAILS`` (constant for -1),
which disables automatic download of the full, detailed analysis report
if a cached result is immediately available.
To access the full analysis report, use ``get_result()`` with the task_uuid
returned as part of the submission result.
- 2016-10-28: Move API client shell to dedicated script.
The API client shell is now available via analysis_apiclient_shell.py, which povides
easier access to helper modules provided by the API client module.
Analysis Client Shell
+++++++++++++++++++++++
In addition to the client, an API shell allows running the client from the command line. This
provides an interactive shell for manually sending requests to the Lastline Analyst
API, and it can be used to experiment with the API for analyzing files or URLs. For details,
refer to the :ref:`API Client Shell documentation <analysis_client_shell>`.
"""
import configparser
import io
import cgi
import collections
import datetime
import hashlib
import http.client
import logging
import os
import requests
import simplejson
import ssl
import sys
import time
try:
from llapi_client import get_proxies_from_config
from llapi_client import llpcap_apiclient
except ImportError:
# Non-Lastline environment. Reading from config not support/needed.
get_proxies_from_config = None
llpcap_apiclient = None
# printing warnings if this module is used in the context of something else is not meaningful. Only
# if this script is invoked directly should we be printing warnings
if __name__ == "__main__":
try:
requests_version = requests.__version__
if not requests_version.startswith('2.2'):
raise Exception()
except Exception:
requests_version = '?'
print((
"Warning: Your version of requests ({}) might not be compatible with this "
"module.".format(requests_version)
), file=sys.stderr)
print("Officially supported are versions 2.2.x", file=sys.stderr)
# copied these values from Lastline utility code (llutils.api.error)
# to make them available to users of client code. please keep in sync!
ANALYSIS_API_FILE_NOT_AVAILABLE = 101
ANALYSIS_API_UNKNOWN_RESOURCE_TYPE = 102 # undocumented
ANALYSIS_API_UNKNOWN_ANALYSIS_TYPE = 103 # undocumented
ANALYSIS_API_INVALID_CREDENTIALS = 104
ANALYSIS_API_INVALID_UUID = 105
ANALYSIS_API_NO_RESULT_FOUND = 106
ANALYSIS_API_TEMPORARILY_UNAVAILABLE = 107
ANALYSIS_API_PERMISSION_DENIED = 108
ANALYSIS_API_FILE_TOO_LARGE = 109
ANALYSIS_API_INVALID_DOMAIN = 110 # undocumented
ANALYSIS_API_INVALID_BACKEND = 111 # undocumented
ANALYSIS_API_INVALID_D_METADATA = 112
ANALYSIS_API_INVALID_FILE_TYPE = 113
ANALYSIS_API_INVALID_ARTIFACT_UUID = 114
ANALYSIS_API_SUBMISSION_LIMIT_EXCEEDED = 115
ANALYSIS_API_INVALID_HASH_ALGORITHM = 116
ANALYSIS_API_INVALID_URL = 117
ANALYSIS_API_INVALID_REPORT_VERSION = 118
ANALYSIS_API_FILE_EXTRACTION_FAILED = 119
ANALYSIS_API_NO_IOC_EXTRACTABLE = 120
ANALYSIS_API_CHILD_TASK_CHAIN_TOO_DEEP = 121
ANALYSIS_API_AUTHENTICATION_REQUIRED = 122
ANALYSIS_API_DATA_NO_LONGER_AVAILABLE = 123
ANALYSIS_API_INVALID_PRIORITY = 124
# other consts
ANALYSIS_API_NO_REPORT_DETAILS = -1
ANALYSIS_API_EXPORT_REPORT_TYPE_OVERVIEW = 'OVERVIEW'
ANALYSIS_API_EXPORT_REPORT_TYPE_ALL = 'ALL'
ANALYSIS_API_EXPORT_REPORT_TYPE_FULL = 'FULL'
ANALYSIS_API_EXPORT_REPORT_TYPES = (
ANALYSIS_API_EXPORT_REPORT_TYPE_OVERVIEW,
ANALYSIS_API_EXPORT_REPORT_TYPE_ALL,
ANALYSIS_API_EXPORT_REPORT_TYPE_FULL
)
ANALYSIS_API_EXPORT_FORMAT_PDF = 'PDF'
ANALYSIS_API_EXPORT_REPORT_FORMATS = (ANALYSIS_API_EXPORT_FORMAT_PDF,)
class Error(Exception):
"""
Base exception class for this module.
"""
class WaitResultTimeout(Error):
"""
Waiting for results timed out.
"""
def __init__(self, msg="Waiting for results timed out"):
Error.__init__(self, msg)
class InvalidSubApiType(Error):
"""
Exception for invalid sub API operations.
The analysis API consists of a number of views (sub APIs):
(only analysis for now)
Operations involving parts other than these will
raise this exceptions.
"""
def __init__(self, sub_api_type):
Error.__init__(self)
self.sub_api_type = sub_api_type
def __str__(self):
return "Invalid sub API '%s', expecting one of (%s)" % (
self.sub_api_type,
','.join(AnalysisClientBase.SUB_APIS))
class InvalidFormat(Error):
"""
Invalid format requested.
"""
def __init__(self, requested_format):
Error.__init__(self)
self.format = requested_format
def __str__(self):
return "Requested Invalid Format '%s', expecting one of (%s)" % (
self.format,
','.join(AnalysisClientBase.FORMATS))
class CommunicationError(Error):
"""
Contacting Malscape failed.
"""
def __init__(self, msg=None, error=None):
Error.__init__(self, msg or error or '')
self.__error = error
def internal_error(self):
return self.__error
class FailedRequestError(CommunicationError):
"""
Exception class to group communication errors returned
on failed HTTP requests.
"""
def __init__(self, msg=None, error=None, status_code=None):
CommunicationError.__init__(self, msg, error)
self.__status_code = status_code
def status_code(self):
return self.__status_code
class InvalidAnalysisAPIResponse(Error):
"""
An AnalysisAPI response was not in the expected format
"""
class AnalysisAPIError(Error):
"""
Analysis API returned an error.
The `error_code` member of this exception
is the :ref:`error code returned by the API<error_codes>`.
"""
def __init__(self, msg, error_code):
Error.__init__(self)
self.msg = msg
self.error_code = error_code
def __str__(self):
if self.error_code:
return "Analysis API error (%s): %s" % (self.error_code, self.msg)
return "Analysis API error: %s" % self.msg
class RequestError(AnalysisAPIError):
"""
Exception class to group errors that are permanent request errors when
following the Lastline Analyst API protocol. These errors indicate a problem
with the request sent to the server - if you repeat the same request, you
cannot expect a different error.
This group excludes temporary errors, such as authentication problems.
"""
class SubmissionInvalidError(RequestError):
"""
Exception class to group errors that are permanent submission errors. See
`RequestError` for details.
"""
class FileNotAvailableError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_FILE_NOT_AVAILABLE):
AnalysisAPIError.__init__(self, msg, error_code)
class InvalidCredentialsError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_CREDENTIALS):
AnalysisAPIError.__init__(self, msg, error_code)
class InvalidUUIDError(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_UUID):
RequestError.__init__(self, msg, error_code)
class NoResultFoundError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_NO_RESULT_FOUND):
AnalysisAPIError.__init__(self, msg, error_code)
class TemporarilyUnavailableError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_TEMPORARILY_UNAVAILABLE):
AnalysisAPIError.__init__(self, msg, error_code)
class PermissionDeniedError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_PERMISSION_DENIED):
AnalysisAPIError.__init__(self, msg, error_code)
class FileTooLargeError(SubmissionInvalidError):
def __init__(self, msg, error_code=ANALYSIS_API_FILE_TOO_LARGE):
SubmissionInvalidError.__init__(self, msg, error_code)
class InvalidFileTypeError(SubmissionInvalidError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_FILE_TYPE):
SubmissionInvalidError.__init__(self, msg, error_code)
class InvalidMetadataError(SubmissionInvalidError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_D_METADATA):
SubmissionInvalidError.__init__(self, msg, error_code)
class InvalidArtifactError(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_ARTIFACT_UUID):
RequestError.__init__(self, msg, error_code)
class SubmissionLimitExceededError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_SUBMISSION_LIMIT_EXCEEDED):
AnalysisAPIError.__init__(self, msg, error_code)
class InvalidHashAlgorithmError(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_HASH_ALGORITHM):
RequestError.__init__(self, msg, error_code)
class InvalidURLError(SubmissionInvalidError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_URL):
SubmissionInvalidError.__init__(self, msg, error_code)
class InvalidReportVersionError(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_REPORT_VERSION):
RequestError.__init__(self, msg, error_code)
class FileExtractionFailedError(SubmissionInvalidError):
def __init__(self, msg, error_code=ANALYSIS_API_FILE_EXTRACTION_FAILED):
SubmissionInvalidError.__init__(self, msg, error_code)
class NoIOCExtractableError(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_NO_IOC_EXTRACTABLE):
RequestError.__init__(self, msg, error_code)
class DataNoLongerAvailable(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_DATA_NO_LONGER_AVAILABLE):
RequestError.__init__(self, msg, error_code)
class InvalidPriority(RequestError):
def __init__(self, msg, error_code=ANALYSIS_API_INVALID_PRIORITY):
RequestError.__init__(self, msg, error_code)
class AuthenticationError(AnalysisAPIError):
def __init__(self, msg, error_code=ANALYSIS_API_AUTHENTICATION_REQUIRED):
AnalysisAPIError.__init__(self, msg, error_code)
class NamedStringIO(io.StringIO):
"""
A wrapper around StringIO to make it look more like a real file-stream.
"""
def __init__(self, buf='', name=None):
# Sanitize buf:
# None value is transformed into 'None'
if not buf:
buf = ''
io.StringIO.__init__(self, buf)
self._name = name
@property
def name(self):
"""
Get the name of the BytesIO, might be None
"""
return self._name
#################
# client
#################
__COMPLETED_TASK_FIELDS = [
"task_uuid",
"score",
"insufficient_task_input_errors",
]
CompletedTask = collections.namedtuple("CompletedTask", __COMPLETED_TASK_FIELDS)
CompletedTask.__new__.__defaults__ = (None, None)
def get_time():
"""
trivial wrapper around time.time to make testing easier
"""
return time.time()
def purge_none(d):
"""
Purge None entries from a dictionary
"""
for k in list(d.keys()):
if d[k] is None:
del d[k]
return d
def hash_stream(stream, algorithm):
"""
Compute the hash of a file-like object
:param stream: stream to hash
:param algorithm: should be one of hashlib.algorithms
"""
if hasattr(hashlib, "algorithms"):
if algorithm not in hashlib.algorithms:
raise NotImplementedError("Hash function '%s' is not available" %
algorithm)
try:
m = hashlib.new(algorithm)
except ValueError:
#unsupported hash type
raise NotImplementedError("Hash function '%s' is not available" %
algorithm)
while True:
s = stream.read(4096)
if not s:
break
m.update(s)
return m.hexdigest()
def parse_datetime(d):
"""
Parse a datetime as formatted in one of the following formats:
date: %Y-%m-%d'
datetime: '%Y-%m-%d %H:%M:%S'
datetime with microseconds: '%Y-%m-%d %H:%M:%S.%f'
Can also handle a datetime.date or datetime.datetime object,
(or anything that has year, month and day attributes)
and converts it to datetime.datetime
"""
if hasattr(d, "year") and hasattr(d, "month") and hasattr(d, "day"):
return datetime.datetime(d.year, d.month, d.day)
try:
return datetime.datetime.strptime(
d, AnalysisClientBase.DATETIME_MSEC_FMT)
except ValueError: pass
try:
return datetime.datetime.strptime(d, AnalysisClientBase.DATETIME_FMT)
except ValueError: pass
try:
return datetime.datetime.strptime(d, AnalysisClientBase.DATE_FMT)
except ValueError:
raise ValueError("Date '%s' does not match format '%s'" % (
d, "%Y-%m-%d[ %H:%M:%S[.%f]]'"))
def get_direction(is_download):
"""
Returns the transfer direction for a file captured during an SMB or FTP request.
:param is_download: True if request was an SMB or FTP download.
:return: "FROM_SERVER" or "TO_SERVER"
"""
if is_download or is_download is None:
return "FROM_SERVER"
return "TO_SERVER"
class TaskCompletion(object):
"""
Helper class to get score for all completed tasks
Sample usage:
tc = TaskCompletion(my_analysis_client)
for completed_task in tc.get_completed(start,end):
print completed_task.task_uuid, completed_task.score
"""
def __init__(self, analysis_client):
"""
:param analysis_apiclient.AnalysisClientBase analysis_client: Client to use for
issuing get-completed requests
"""
self.__analysis_client = analysis_client
def get_completed(self, after, before):
"""
Return scores and additional metadata of tasks completed in the specified time range.
This takes care of using the analysis API's pagination
to make sure it gets all tasks.
:param after: datetime.datetime
:param before: datetime.datetime
:yield: sequence of `CompletedTask`
:raise: InvalidAnalysisAPIResponse if response
does not have the format we expect
"""
try:
while True:
result = self.__analysis_client.get_completed_with_metadata(
after=after,
before=before)
data = result["data"]
tasks = data["tasks"]
if tasks:
for task in tasks:
yield CompletedTask(
task_uuid=task['task_uuid'],
score=task['score'],
insufficient_task_input_errors=task.get(
'insufficient_task_input_errors'
),
)
# NOTE: Even if no tasks have been returned, the API may still have returned us
# the flag to query again (e.g., on a sliding window of time).
try:
more = int(data["more_results_available"])
except (KeyError, TypeError, ValueError):
# this flag was not in the initial API specs, so be a bit loose about parsing it
more = 0
if not more:
break
last_ts = parse_datetime(data["before"])
if last_ts >= before:
break
after = last_ts
except (KeyError, ValueError, TypeError, AttributeError):
# attributeError needed in case iteritems is missing (not a dict)
# let's give it the trace of the original exception, so we know
# what the specific problem is!
trace = sys.exc_info()[2]
raise InvalidAnalysisAPIResponse(
"Unable to parse response to get_completed()").with_traceback(trace)
class SubmissionTracker(object):
"""
Helper class to track the state of submissions until they're completed
:param analysis_client: analysis_apiclient.AnalysisClientBase
:param task_completion: analysis_apiclient.TaskCompletion or None
If not provided, will create one from the analysis_client.
Providing this parameter explicitly is mainly for testing.
- `track_submission()` is used to add the submission to the list of tasks
that we are keeping track of.
- `get_completed()` is used to get the results of tracked submissions
that have completed so far
Invocations of the two methods can be interleaved to add new tasks to keep
track of while others are still waiting to be completed.
"""
def __init__(self, analysis_client, task_completion=None):
self.__analysis_client = analysis_client
if not task_completion:
task_completion = TaskCompletion(analysis_client)
self.__task_completion = task_completion
# tasks we are currently tracking
self.__tracked_uuids = set()
# how far back in time we have to go for completion call
self.__min_timestamp = None
@property
def min_timestamp(self):
"""
Minimum timestamp from which next calls to get_completed call will start.
It may be useful to access this property
to serialize the state of the SubmissionTracker.
:rtype: datetime.datetime
"""
return self.__min_timestamp
@property
def num_tracked_uuids(self):
return len(self.__tracked_uuids)
def get_tracked_uuids(self):
"""
Return the current tracked uuids
It may be useful to access this property
to serialize the state of the SubmissionTracker.
:return: Sequence of task_uuids
"""
return set(self.__tracked_uuids)
def track_submission(self, task_uuid, submission_utc_timestamp):
"""
Start keeping track of the specified submission
:param task_uuid: UUID of submission to track
:type task_uuid: str
:param submission_utc_timestamp: Timestamp of the submission according to
the API server. A correct API timestamp can be obtained by
invoking `AnalysiClientBase.get_api_timestamp()`.
Providing a timestamp before the actual submission timestamp
will also work but may lead to less efficient use
of the get_completed API.
:type submission_utc_timestamp: datetime.datetime
"""
self.__tracked_uuids.add(task_uuid)
if self.__min_timestamp:
self.__min_timestamp = min(
self.__min_timestamp, submission_utc_timestamp)
else:
self.__min_timestamp = submission_utc_timestamp
def get_completed(self):
"""
Get results for tracked tasks that have completed so far
Once a completed task is returned by this method,
it will be removed from the set of tasks that are being tracked,
so it will not be returned again by later calls to this method.
:yield: sequence of `CompletedTask`
:raise: InvalidAnalysisAPIResponse if response
does not have the format we expect
"""
if not self.__tracked_uuids:
return
# cannot be None as otherwise we'd have no tracked uuids
assert self.__min_timestamp is not None, "SubmissionTracker has no min_timestamp!"
after = self.__min_timestamp
before = self.__analysis_client.get_api_utc_timestamp()
for completed_task in self.__task_completion.get_completed(after, before):
try:
self.__tracked_uuids.remove(completed_task.task_uuid)
yield completed_task
except KeyError:
# not a task we are tracking, so ignore it
pass
# we've examined all tasks up to this point, so move the starting time
self.__min_timestamp = before
class MockSession(object):
"""
This class acts as a drop-in replacement for the python-requests Session object in cases where
the client should not use a real session. This is useful in case where
- the API server does not support sessions. This feature was added a while back, but we want to
make sure that the latest client works with older versions of the API server, or
- a session introduces overhead rather than reduce it.
NOTE: This session implementation will embed the provided API credentials (if any) in each API
request, including GET requests. When using this class, ensure that the client does not invoke
GET requests to avoid leaking credentials into server logs.
NOTE: This is not a drop-in replacement for `requests.Session`. It only implements those parts
of the Session object's interface that we actually use in the `AnalysisAPIClient` class.
"""
def __init__(self, credentials=None, logger=None):
"""
:param dict|None credentials: Optional credentials to embed in each API request
:param logging.Logger|None logger: if provided, should be a python logging.Logger object
or object with similar interface.
"""
self.__credentials = credentials
self.__requests_session = None
self.__logger = logger
def request(self, method, url, **kwargs):
"""
Perform a request on this session - for more details, refer to `requests.Session.request()`
"""
if self.__requests_session is None:
self.__requests_session = requests.session()
data = {}
try:
data = kwargs.pop('data')
except KeyError:
pass
else:
# just to be on the safe side if someone explicitly passed in None
if data is None:
data = {}
# rewrite GET to POST: see class doc-string
if method.upper() == 'GET':
method = 'POST'
try:
params = kwargs.pop('params')
except KeyError:
pass # no GET args to deal with
else:
if params:
data.update(params)
if self.__logger:
self.__logger.debug(
"Rewrote %s %s to POST, moved %d GET args", method, url,
len(params) if params else 0)
# now embed the credentials if needed
if self.__credentials is not None:
for key, value in self.__credentials.items():
if key not in data:
data[key] = value
# now do the actual request
return self.__requests_session.request(method, url, data=data, **kwargs)
def close(self):
"""
Tear down this session object - for more details, refer to `requests.Session.close()`
"""
self.__requests_session.close()
class AnalysisClientBase(object):
"""
A client for the Lastline analysis API.
This is an abstract base class: concrete
subclasses just need to implement the _api_request
method to actually send the API request to the server.
:param base_url: URL where the lastline analysis API is located. (required)
:param logger: if provided, should be a python logging.Logger object
or object with similar interface.
"""
SUB_APIS = ('analysis', 'management', 'authentication')
DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
DATETIME_MSEC_FMT = DATETIME_FMT + '.%f'
DATE_FMT = '%Y-%m-%d'
FORMATS = ["json", "xml", "pdf", "rtf"]
REQUEST_PERFDATA = False
ERRORS = {
ANALYSIS_API_FILE_NOT_AVAILABLE: FileNotAvailableError,
ANALYSIS_API_INVALID_CREDENTIALS: InvalidCredentialsError,
ANALYSIS_API_INVALID_UUID: InvalidUUIDError,
ANALYSIS_API_NO_RESULT_FOUND: NoResultFoundError,
ANALYSIS_API_TEMPORARILY_UNAVAILABLE: TemporarilyUnavailableError,
ANALYSIS_API_PERMISSION_DENIED: PermissionDeniedError,
ANALYSIS_API_FILE_TOO_LARGE: FileTooLargeError,
ANALYSIS_API_INVALID_FILE_TYPE: InvalidFileTypeError,
ANALYSIS_API_INVALID_DOMAIN: InvalidMetadataError,
ANALYSIS_API_INVALID_D_METADATA: InvalidMetadataError,
ANALYSIS_API_INVALID_ARTIFACT_UUID: InvalidArtifactError,
ANALYSIS_API_SUBMISSION_LIMIT_EXCEEDED: SubmissionLimitExceededError,
ANALYSIS_API_INVALID_HASH_ALGORITHM: InvalidHashAlgorithmError,
ANALYSIS_API_INVALID_URL: InvalidURLError,
ANALYSIS_API_INVALID_REPORT_VERSION: InvalidReportVersionError,
ANALYSIS_API_FILE_EXTRACTION_FAILED: FileExtractionFailedError,
ANALYSIS_API_NO_IOC_EXTRACTABLE: NoIOCExtractableError,
ANALYSIS_API_DATA_NO_LONGER_AVAILABLE: DataNoLongerAvailable,
ANALYSIS_API_INVALID_PRIORITY: InvalidPriority,
ANALYSIS_API_AUTHENTICATION_REQUIRED: AuthenticationError,
}
HTTP_ERRORS = {
http.client.UNAUTHORIZED: AuthenticationError,
http.client.FORBIDDEN: PermissionDeniedError,
}
SUPPORTED_IOC_PCAP_VERSION = 2
def __init__(self, base_url, use_cdn=None, logger=None, config=None):
self.__logger = logger
self.__base_url = base_url
self.__use_cdn = use_cdn
self.__config = config
def _logger(self):
return self.__logger
def _build_url(self, sub_api, parts, requested_format="json"):
if sub_api not in self.SUB_APIS:
raise InvalidSubApiType(sub_api)
if requested_format not in self.FORMATS:
raise InvalidFormat(requested_format)
num_parts = 2 + len(parts)
pattern = "/".join(["%s"] * num_parts) + ".%s"
params = [self.__base_url, sub_api] + parts + [requested_format]
return pattern % tuple(params)
def _build_file_download_url(self, sub_api, parts):
"""
Generate a URL to a direct file download
"""
if sub_api not in self.SUB_APIS:
raise InvalidSubApiType(sub_api)
num_parts = 2 + len(parts)
pattern = "/".join(["%s"] * num_parts)
params = [self.__base_url, sub_api] + parts
return pattern % tuple(params)
def _check_file_like(self, f, param_name):
if not hasattr(f, 'read'):
raise AttributeError("The %s parameter is not a file-like object" %
param_name)
def submit_exe_hash(self,
md5=None,
sha1=None,
download_ip=None,
download_port=None,
download_url=None,
download_host=None,
download_path=None,
download_agent=None,
download_referer=None,
download_request=None,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
bypass_cache=None,
raw=False,
verify=True):
"""
Submit a file by hash.
*Deprecated*. Use `submit_file_hash()`
"""
return self.submit_file_hash(md5, sha1,
download_ip=download_ip,
download_port=download_port,
download_url=download_url,
download_host=download_host,
download_path=download_path,
download_agent=download_agent,
download_referer=download_referer,
download_request=download_request,
full_report_score=full_report_score,
bypass_cache=bypass_cache,
raw=raw,
verify=verify)
def submit_file_hash(self,
md5=None,
sha1=None,
sha256=None,
download_ip=None,
download_port=None,
download_url=None,
download_host=None,
download_path=None,
download_agent=None,
download_referer=None,
download_request=None,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
bypass_cache=None,
password=None,
password_candidates=None,
backend=None,
require_file_analysis=True,
mime_type=None,
analysis_timeout=None,
analysis_env=None,
allow_network_traffic=None,
filename=None,
keep_file_dumps=None,
keep_memory_dumps=None,
keep_behavior_log=None,
push_to_portal_account=None,
raw=False,
verify=True,
server_ip=None,
server_port=None,
server_host=None,
client_ip=None,
client_port=None,
is_download=True,
protocol="http",
apk_package_name=None,
report_version=None,
analysis_task_uuid=None,
analysis_engine=None,
task_metadata=None,
priority=None,
bypass_prefilter=None,
fast_analysis=None):
"""
Submit a file by hash.
One of the md5, sha1, or sha256 parameters must be provided.
If both are provided, they should be consistent.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.submit_file`.
:param md5: md5 hash of file.
:param sha1: sha1 hash of file.
:param sha256: sha256 hash of file.
:param download_ip: DEPRECATED! Use server_ip instead.
:param download_port: DEPRECATED! Use server_port instead.
:param download_url: DEPRECATED! replaced by the download_host
and download_path parameters
:param download_host: hostname of the server-side endpoint of
the connection, as a string of bytes (not unicode).
:param download_path: host path from which the submitted file
was originally downloaded, as a string of bytes (not unicode)
:param download_agent: HTTP user-agent header that was used
when the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param download_referer: HTTP referer header that was used
when the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param download_request: full HTTP request with
which the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param full_report_score: if set, this value (between -1 and 101)
determines starting at which scores a full report is returned.
-1 and 101 indicate "never return full report";
0 indicates "return full report at all times"
:param bypass_cache: if True, the API will not serve a cached
result. NOTE: This requires special privileges.
:param password: password used to analyze password-protected or
encrypted content (such as archives or documents)
:param password_candidates: List of passwords used to analyze password-protected or
encrypted content (such as archives or documents)
:param require_file_analysis: if True, the submission requires an
analysis run to be started. If False, the API will attempt to
base a decision solely on static information such as
download source reputation and hash lookups. Requires special
permissions; Lastline-internal/do not use
:param mime_type: the mime-type of the file; This value should be
set when require_file_analysis is True to enforce getting the
most information available
:param analysis_timeout: timeout in seconds after which to terminate
analysis. The analysis engine might decide to extend this timeout
if necessary. If all analysis subjects terminate before this timeout
analysis might be shorter
:param analysis_env: environment in which to run analysis. This includes
the operating system as well as version of tools such as Microsoft
Office. Example usage:
- windows7:office2003, or
- windowsxp
By default, analysis will run on all available operating systems
using the most applicable tools.
:param allow_network_traffic: if False, all network connections will be
redirected to a honeypot. Requires special permissions.
:param filename: filename to use during analysis. If none is passed,
the analysis engine will pick an appropriate name automatically.
An easy way to pass this value is to use 'file_stream.name' for most
file-like objects
:param keep_file_dumps: if True, all files generated during
analysis will be kept for post-processing. NOTE: This can generate
large volumes of data and is not recommended. Requires special
permissions
:param keep_memory_dumps: if True, all buffers allocated during
analysis will be kept for post-processing. NOTE: This can generate
*very* large volumes of data and is not recommended. Requires
special permissions
:param keep_behavior_log: if True, the raw behavior log extracted during
analysis will be kept for post-processing. NOTE: This can generate
*very very* large volumes of data and is not recommended. Requires
special permissions
:param push_to_portal_account: if set, a successful submission will be
pushed to the web-portal using the specified account
:param backend: DEPRECATED! Don't use
:param verify: if False, disable SSL-certificate verification
:param raw: if True, return the raw json results of the API query
:param server_ip: ASCII dotted-quad representation of the IP address of
the server-side endpoint.
:param server_port: integer representation of the port number
of the server-side endpoint of the flow tuple.
:param server_host: DEPRECATED! Don't use
:param client_ip: ASCII dotted-quad representation of the IP address of
the client-side endpoint.
:param client_port: integer representation of the port number
of the client-side endpoint of the flow tuple.
:param is_download: Boolean; True if the transfer happened in the
server -> client direction, False otherwise (client -> server).
:param protocol: app-layer protocol in which the file got
transferred. Short ASCII string.
:param apk_package_name: package name for APK files. Don't specify
manually.
:param report_version: Version name of the Report that will be returned
(optional);
:param analysis_task_uuid: if the call is used to create a child task,
it specifies the current analysis task UUID; None otherwise.
Lastline-internal/do not use.
:param analysis_engine: if analysis_task_uuid is provided, it specifies
the sandbox it refers to; None otherwise. Lastline-internal/do not
use.
:param task_metadata: optional task-metadata to upload. Requires special
permissions; Lastline-internal/do not use
:param priority: Priority level to set for this analysis. Priority should
be between 1 and 10 (1 is the lowest priority, 10 is the highest).
Setting priority to any value other than 1 requires special permissions.
:param bypass_prefilter: Boolean; If True, file is submitted to all supported
analysis components without prior static analysis. Requires special permissions.
:param fast_analysis: Boolean; If True, file is submitted only to fast analyzers (static)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# this parameter was introduced into the LLAPI-client at some point, but
# it's actually not supported by the API!
_unused = server_host
if self.__logger and backend:
self.__logger.warning("Ignoring deprecated parameter 'backend'")
url = self._build_url("analysis", ["submit", "file"])
# These options require special permissions, so we should not set them
# if not specified
if allow_network_traffic is not None:
allow_network_traffic = allow_network_traffic and 1 or 0
if keep_file_dumps is not None:
keep_file_dumps = keep_file_dumps and 1 or 0
if keep_memory_dumps is not None:
keep_memory_dumps = keep_memory_dumps and 1 or 0
if keep_behavior_log is not None:
keep_behavior_log = keep_behavior_log and 1 or 0
if bypass_prefilter is not None:
bypass_prefilter = bypass_prefilter and 1 or 0
if fast_analysis is not None:
fast_analysis = fast_analysis and 1 or 0
params = purge_none({
"md5": md5,
"sha1": sha1,
"sha256": sha256,
"full_report_score": full_report_score,
"bypass_cache": bypass_cache and 1 or None,
"password": password,
"require_file_analysis": require_file_analysis and 1 or 0,
"mime_type": mime_type,
"download_ip": download_ip,
"download_port": download_port,
# analysis-specific options:
"analysis_timeout": analysis_timeout or None,
"analysis_env": analysis_env,
"allow_network_traffic": allow_network_traffic,
"filename": filename,
"keep_file_dumps": keep_file_dumps,
"keep_memory_dumps": keep_memory_dumps,
"keep_behavior_log": keep_behavior_log,
"push_to_portal_account": push_to_portal_account or None,
"server_ip": server_ip,
"server_port": server_port,
"client_ip": client_ip,
"client_port": client_port,
"direction": get_direction(is_download),
"protocol": protocol,
"apk_package_name": apk_package_name,
"report_version": report_version,
"analysis_task_uuid": analysis_task_uuid,
"analysis_engine": analysis_engine,
"priority": priority,
"bypass_prefilter": bypass_prefilter,
"fast_analysis": fast_analysis,
})
# using and-or-trick to convert to a StringIO if it is not None
# this just wraps it into a file-like object
files = purge_none({
"download_url": download_url is not None and \
io.StringIO(download_url) or None,
"download_host": download_host is not None and \
io.StringIO(download_host) or None,
"download_path": download_path is not None and \
io.StringIO(download_path) or None,
"download_agent": download_agent is not None and \
io.StringIO(download_agent) or None,
"download_referer": download_referer is not None and \
io.StringIO(download_referer) or None,
"download_request": download_request is not None and \
io.StringIO(download_request) or None,
"task_metadata": io.StringIO(simplejson.dumps(task_metadata))
if task_metadata is not None else None,
# NOTE: We enforce that the given collection is a unique list (set cannot be
# serialized). Further, if we are given an empty collection, we don't bother sending
# the json
"password_candidates": io.StringIO(simplejson.dumps(
list(set(password_candidates)))) if password_candidates else None,
})
return self._api_request(url, params, files=files, post=True,
raw=raw, verify=verify)
def submit_exe_file(self,
file_stream,
download_ip=None,
download_port=None,
download_url=None,
download_host=None,
download_path=None,
download_agent=None,
download_referer=None,
download_request=None,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
bypass_cache=None,
delete_after_analysis=False,
raw=False,
verify=True):
"""
Submit a file by uploading it.
*Deprecated*. Use `submit_file()`
"""
return self.submit_file(file_stream,
download_ip=download_ip,
download_port=download_port,
download_url=download_url,
download_host=download_host,
download_path=download_path,
download_agent=download_agent,
download_referer=download_referer,
download_request=download_request,
full_report_score=full_report_score,
bypass_cache=bypass_cache,
delete_after_analysis=delete_after_analysis,
raw=raw,
verify=verify)
def submit_file(self, file_stream,
download_ip=None,
download_port=None,
download_url=None,
download_host=None,
download_path=None,
download_agent=None,
download_referer=None,
download_request=None,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
bypass_cache=None,
delete_after_analysis=None,
backend=None,
analysis_timeout=None,
analysis_env=None,
allow_network_traffic=None,
filename=None,
keep_file_dumps=None,
keep_memory_dumps=None,
keep_behavior_log=None,
push_to_portal_account=None,
raw=False,
verify=True,
server_ip=None,
server_port=None,
server_host=None,
client_ip=None,
client_port=None,
is_download=True,
protocol="http",
apk_package_name=None,
password=None,
password_candidates=None,
report_version=None,
analysis_task_uuid=None,
analysis_engine=None,
task_metadata=None,
priority=None,
bypass_prefilter=None,
fast_analysis=None):
"""
Submit a file by uploading it.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.submit_file`.
:param file_stream: file-like object containing
the file to upload.
:param download_ip: DEPRECATED! Use server_ip instead.
:param download_port: DEPRECATED! Use server_port instead.
:param download_url: DEPRECATED! replaced by the download_host
and download_path parameters
:param download_host: hostname of the server-side endpoint of
the connection, as a string of bytes (not unicode).
:param download_path: host path from which the submitted file
was originally downloaded, as a string of bytes (not unicode)
:param download_agent: HTTP user-agent header that was used
when the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param download_referer: HTTP referer header that was used
when the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param download_request: full HTTP request with
which the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param full_report_score: if set, this value (between -1 and 101)
determines starting at which scores a full report is returned.
-1 and 101 indicate "never return full report";
0 indicates "return full report at all times"
:param bypass_cache: if True, the API will not serve a cached
result. NOTE: This requires special privileges.
:param delete_after_analysis: if True, the backend will delete the
file after analysis is done (and noone previously submitted
this file with this flag set)
:param analysis_timeout: timeout in seconds after which to terminate
analysis. The analysis engine might decide to extend this timeout
if necessary. If all analysis subjects terminate before this timeout
analysis might be shorter
:param analysis_env: environment in which to run analysis. This includes
the operating system as well as version of tools such as Microsoft
Office. Example usage:
- windows7:office2003, or
- windowsxp
By default, analysis will run on all available operating systems
using the most applicable tools.
:param allow_network_traffic: if False, all network connections will be
redirected to a honeypot. Requires special permissions.
:param filename: filename to use during analysis. If none is passed,
the analysis engine will pick an appropriate name automatically.
An easy way to pass this value is to use 'file_stream.name' for most
file-like objects
:param keep_file_dumps: if True, all files generated during
analysis will be kept for post-processing. NOTE: This can generate
large volumes of data and is not recommended. Requires special
permissions
:param keep_memory_dumps: if True, all buffers allocated during
analysis will be kept for post-processing. NOTE: This can generate
large volumes of data and is not recommended. Requires special
permissions
:param keep_behavior_log: if True, the raw behavior log extracted during
analysis will be kept for post-processing. NOTE: This can generate
*very very* large volumes of data and is not recommended. Requires
special permissions
:param push_to_portal_account: if set, a successful submission will be
pushed to the web-portal using the specified username
:param backend: DEPRECATED! Don't use
:param verify: if False, disable SSL-certificate verification
:param raw: if True, return the raw JSON results of the API query
:param server_ip: ASCII dotted-quad representation of the IP address of
the server-side endpoint.
:param server_port: integer representation of the port number
of the server-side endpoint of the flow tuple.
:param server_host: DEPRECATED! Don't use
:param client_ip: ASCII dotted-quad representation of the IP address of
the client-side endpoint.
:param client_port: integer representation of the port number
of the client-side endpoint of the flow tuple.
:param is_download: Boolean; True if the transfer happened in the
server -> client direction, False otherwise (client -> server).
:param protocol: app-layer protocol in which the file got
transferred. Short ASCII string.
:param report_version: Version name of the Report that will be returned
(optional);
:param apk_package_name: package name for APK files. Don't specify
manually.
:param password: password used to analyze password-protected or
encrypted content (such as archives or documents)
:param password_candidates: List of passwords used to analyze password-protected or
encrypted content (such as archives or documents)
:param analysis_task_uuid: if the call is used to create a child task,
it specifies the current analysis task UUID; None otherwise.
Lastline-internal/do not use.
:param analysis_engine: if analysis_task_uuid is provided, it specifies
the sandbox it refers to; None otherwise. Lastline-internal/do not
use.
:param task_metadata: optional task-metadata to upload. Requires special
permissions; Lastline-internal/do not use
:param priority: Priority level to set for this analysis. Priority should
be between 1 and 10 (1 is the lowest priority, 10 is the highest)
Setting priority to any value other than 1 requires special permissions.
:param bypass_prefilter: Boolean; If True, file is submitted to all supported
analysis components without prior static analysis. Requires special permissions.
:param fast_analysis: Boolean; If True, file is submitted only to fast analyzers (static)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# this parameter was introduced into the LLAPI-client at some point, but
# it's actually not supported by the API!
_unused = server_host
if self.__logger and backend:
self.__logger.warning("Ignoring deprecated parameter 'backend'")
if filename is None and hasattr(file_stream, 'name'):
filename = os.path.basename(file_stream.name)
self._check_file_like(file_stream, "file_stream")
url = self._build_url("analysis", ["submit", "file"])
# These options require special permissions, so we should not set them
# if not specified
if allow_network_traffic is not None:
allow_network_traffic = allow_network_traffic and 1 or 0
if keep_file_dumps is not None:
keep_file_dumps = keep_file_dumps and 1 or 0
if keep_memory_dumps is not None:
keep_memory_dumps = keep_memory_dumps and 1 or 0
if keep_behavior_log is not None:
keep_behavior_log = keep_behavior_log and 1 or 0
if bypass_prefilter is not None:
bypass_prefilter = bypass_prefilter and 1 or 0
if fast_analysis is not None:
fast_analysis = fast_analysis and 1 or 0
params = purge_none({
"bypass_cache": bypass_cache and 1 or None,
"full_report_score": full_report_score,
"delete_after_analysis": delete_after_analysis and 1 or 0,
"download_ip": download_ip,
"download_port": download_port,
# analysis-specific options:
"analysis_timeout": analysis_timeout or None,
"analysis_env": analysis_env,
"allow_network_traffic": allow_network_traffic,
"filename": filename,
"keep_file_dumps": keep_file_dumps,
"keep_memory_dumps": keep_memory_dumps,
"keep_behavior_log": keep_behavior_log,
"push_to_portal_account": push_to_portal_account or None,
"server_ip": server_ip,
"server_port": server_port,
"client_ip": client_ip,
"client_port": client_port,
"direction": get_direction(is_download),
"protocol": protocol,
"apk_package_name": apk_package_name,
"password": password,
"report_version": report_version,
"analysis_task_uuid": analysis_task_uuid,
"analysis_engine": analysis_engine,
"priority": priority,
"bypass_prefilter": bypass_prefilter,
"fast_analysis": fast_analysis,
})
# using and-or-trick to convert to a StringIO if it is not None
# this just wraps it into a file-like object
files = purge_none({
# If an explicit filename was provided, we can pass it down to
# python-requests to use it in the multipart/form-data. This avoids
# having python-requests trying to guess the filenam based on stream
# attributes.
#
# The problem with this is that, if the filename is not ASCII, then
# this triggers a bug in flask/werkzeug which means the file is
# thrown away. Thus, we just force an ASCII name
"file": ('dummy-ascii-name-for-file-param', file_stream),
"download_url": download_url is not None and \
io.StringIO(download_url) or None,
"download_host": download_host is not None and \
io.StringIO(download_host) or None,
"download_path": download_path is not None and \
io.StringIO(download_path) or None,
"download_agent": download_agent is not None and \
io.StringIO(download_agent) or None,
"download_referer": download_referer is not None and \
io.StringIO(download_referer) or None,
"download_request": download_request is not None and \
io.StringIO(download_request) or None,
"task_metadata": io.StringIO(simplejson.dumps(task_metadata))
if task_metadata is not None else None,
# NOTE: We enforce that the given collection is a unique list (set cannot be
# serialized). Further, if we are given an empty collection, we don't bother sending
# the json
"password_candidates": io.StringIO(simplejson.dumps(
list(set(password_candidates)))) if password_candidates else None,
})
return self._api_request(url, params, files=files, post=True,
raw=raw, verify=verify)
def submit_file_metadata(self, md5, sha1,
download_ip,
download_port,
download_host=None,
download_path=None,
download_agent=None,
download_referer=None,
download_request=None,
raw=False,
verify=True):
"""
Submit metadata regarding a file download.
*Deprecated*. Do not use.
Both the md5 and the sha1 parameter must be provided.
:param md5: md5 hash of the downloaded file.
:param sha1: sha1 hash of the downloaded file.
:param download_ip: ASCII dotted-quad representation of the IP address
from which the file has been downloaded
:param download_port: integer representation of the port number
from which the file has been downloaded
:param download_host: host from which the submitted file
was originally downloaded, as a string of bytes (not unicode)
:param download_path: host path from which the submitted file
was originally downloaded, as a string of bytes (not unicode)
:param download_agent: HTTP user-agent header that was used
when the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param download_referer: HTTP referer header that was used
when the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param download_request: full HTTP request with
which the submitted file was originally downloaded,
as a string of bytes (not unicode)
:param verify: if False, disable SSL-certificate verification
:param raw: if True, return the raw json results of the API query
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["submit", "download"])
params = {
"md5": md5,
"sha1": sha1,
"download_ip": download_ip,
"download_port": download_port
}
#using and-or-trick to convert to a StringIO if it is not None
#this just wraps it into a file-like object
files = {
"download_host": download_host is not None and \
io.StringIO(download_host) or None,
"download_path": download_path is not None and \
io.StringIO(download_path) or None,
"download_agent": download_agent is not None and \
io.StringIO(download_agent) or None,
"download_referer": download_referer is not None and \
io.StringIO(download_referer) or None,
"download_request": download_request is not None and \
io.StringIO(download_request) or None
}
purge_none(files)
purge_none(params)
return self._api_request(url, params, files=files, post=True,
raw=raw, verify=verify)
def submit_url(self,
url,
referer=None,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
bypass_cache=None,
backend=None,
analysis_timeout=None,
push_to_portal_account=None,
raw=False,
verify=True,
user_agent=None,
report_version=None,
analysis_task_uuid=None,
analysis_engine=None,
priority=None,
task_metadata=None,
fast_analysis=None,
password_candidates=None):
"""
Submit a url.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.submit_url`.
:param url: url to analyze
:param referer: referer header to use for analysis
:param full_report_score: if set, this value (between -1 and 101)
determines starting at which scores a full report is returned.
-1 and 101 indicate "never return full report";
0 indicates "return full report at all times"
:param bypass_cache: if True, the API will not serve a cached
result. NOTE: This requires special privileges.
:param analysis_timeout: timeout in seconds after which to terminate
analysis. The analysis engine might decide to extend this timeout
if necessary. If all analysis subjects terminate before this timeout
analysis might be shorter
:param push_to_portal_account: if set, a successful submission will be
pushed to the web-portal using the specified account
:param backend: DEPRECATED! Don't use
:param verify: if False, disable SSL-certificate verification
:param raw: if True, return the raw JSON results of the API query
:param report_version: Version name of the Report that will be returned
(optional);
:param user_agent: user agent header to use for analysis
:param analysis_task_uuid: if the call is used to create a child task,
it specifies the current analysis task UUID; None otherwise.
Lastline-internal/do not use.
:param analysis_engine: if analysis_task_uuid is provided, it specifies
the sandbox it refers to; None otherwise. Lastline-internal/do not
use.
:param priority: Priority level to set for this analysis. Priority should
be between 1 and 10 (1 is the lowest priority, 10 is the highest).
Setting priority to any value other than 1 requires special permissions.
:param task_metadata: optional task-metadata to upload. Requires special
permissions; Lastline-internal/do not use
:param fast_analysis: Boolean; If True, url is submitted only to fast analyzers (static)
:param password_candidates: List of passwords used to analyze password-protected or
encrypted content from the URL.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
if self.__logger and backend:
self.__logger.warning("Ignoring deprecated parameter 'backend'")
api_url = self._build_url("analysis", ["submit", "url"])
if fast_analysis is not None:
fast_analysis = fast_analysis and 1 or 0
params = purge_none({
"url":url,
"referer":referer,
"full_report_score":full_report_score,
"bypass_cache":bypass_cache and 1 or None,
"analysis_timeout": analysis_timeout or None,
"push_to_portal_account": push_to_portal_account or None,
"user_agent": user_agent or None,
"report_version": report_version,
"analysis_task_uuid": analysis_task_uuid or None,
"analysis_engine": analysis_engine,
"priority": priority,
"fast_analysis": fast_analysis,
})
files = purge_none({
"task_metadata": io.StringIO(simplejson.dumps(task_metadata))
if task_metadata is not None else None,
# NOTE: We enforce that the given collection is a unique list (set cannot be
# serialized). Further, if we are given an empty collection, we don't bother sending
# the json
"password_candidates": io.StringIO(simplejson.dumps(
list(set(password_candidates)))) if password_candidates else None,
})
return self._api_request(api_url, params, files=files, post=True,
raw=raw, verify=verify)
def get_result(self,
uuid,
report_uuid=None,
full_report_score=None,
include_scoring_components=None,
raw=False,
requested_format="json",
verify=True,
report_version=None,
allow_datacenter_redirect=None):
"""
Get results for a previously submitted analysis task.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_results`.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param report_uuid: if set, include this report in the result.
:param full_report_score: if set, this value (between -1 and 101)
determines starting at which scores a full report is returned.
-1 and 101 indicate "never return full report";
0 indicates "return full report at all times"
:param include_scoring_components: if True, the result will contain
details of all components contributing to the overall score.
Requires special permissions
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON, XML, PDF, or RTF.
If format is not JSON, this implies `raw`.
:param report_version: Version of the report to be returned
If *report_uuid* is not specified, this parameter is ignored.
(optional)
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# better: use 'get_results()' but that would break
# backwards-compatibility
url = self._build_url('analysis', ['get'],
requested_format=requested_format)
params = purge_none({
'uuid': uuid,
'report_uuid': report_uuid,
'full_report_score': full_report_score,
'include_scoring_components': include_scoring_components and 1 or 0,
'report_version': report_version,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != 'json':
raw = True
# NOTE: This API request may return real HTTP status-codes (and errors)
# directly when fetching IOC reports.
try:
result = self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
verify=verify)
except FailedRequestError as exc:
status_code = str(exc.status_code())
if status_code == '404':
raise InvalidUUIDError(str(exc))
if status_code == '412':
raise NoResultFoundError(str(exc))
# we have nothing more specific to say -- raise the
# original FailedRequestError
raise
# Legacy support:
# results are always returned as strings no matter
# the content disposition of the server response.
if isinstance(result, NamedStringIO):
return result.read()
return result
def get_result_summary(self, uuid, raw=False,
requested_format="json",
score_only=False,
verify=True,
allow_datacenter_redirect=None):
"""
Get result summary for a previously submitted analysis task.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_result`.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this
implies `raw`.
:param score_only: if True, return even less data (only score and
threat/threat-class classification).
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_result"],
requested_format=requested_format)
params = purge_none({
'uuid': uuid,
'score_only': score_only and 1 or 0,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != "json":
raw = True
return self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
verify=verify)
def get_result_activities(self, uuid, raw=False,
requested_format="json",
verify=True,
allow_datacenter_redirect=None):
"""
Get the behavior/activity information for a previously submitted
analysis task.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_result_activities`.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this
implies `raw`.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_result_activities"],
requested_format=requested_format)
params = purge_none({
'uuid': uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != "json":
raw = True
return self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
verify=verify)
def get_report_activities(self, uuid, report_uuid, raw=False,
requested_format="json",
verify=True,
allow_datacenter_redirect=None):
"""
Get the behavior/activity information for a specific analysis report.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_report_activities`.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param report_uuid: the unique report identifier returned as part of
the dictionary returned by get_result().
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this
implies `raw`.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_report_activities"],
requested_format=requested_format)
params = purge_none({
'uuid': uuid,
'report_uuid': report_uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != "json":
raw = True
return self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
verify=verify)
def get_result_artifact(self, uuid, report_uuid, artifact_name, password_protected=None,
raw=False, verify=True, allow_datacenter_redirect=None):
"""
Get artifact generated by an analysis result for a previously
submitted analysis task.
NOTE: Consider using `get_report_artifact()` if the artifact is bound to a specific
analysis report (which it is in practically all cases.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param report_uuid: the unique report identifier returned as part of
the dictionary returned by get_result().
:param artifact_name: the name of the artifact as mentioned in the
given report in the dictionary returned by get_result().
:param str password_protected: If provided, use this password to create a zip which will
contain the artifact being fetched. The password provided should be using only
ASCII characters and have max length of 128 characters
:param raw: if True, return the raw JSON/XML results of the API query.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# NOTE: we cannot simply use "get_report_artifact" in this function, because that
# function does not allow returning JSON/XML formatted data
url = self._build_file_download_url("analysis",
["get_result_artifact"])
params = purge_none({
'uuid': uuid,
'artifact_uuid': "%s:%s" % (report_uuid, artifact_name),
'password_protected': password_protected,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
# NOTE: This API request is completely different because it
# returns real HTTP status-codes (and errors) directly
try:
result = self._api_request(url, params, requested_format='raw',
raw=raw, verify=verify)
if not result.len:
raise InvalidArtifactError("The artifact is empty")
except FailedRequestError as exc:
status_code = str(exc.status_code())
if status_code == '401':
raise PermissionDeniedError(
"Permission denied to access artifacts")
if status_code == '404':
raise InvalidArtifactError(str(exc))
if status_code == '410':
raise InvalidArtifactError(
"The artifact is no longer available")
if status_code == '412':
raise InvalidUUIDError(str(exc))
# we have nothing more specific to say -- raise the
# original FailedRequestError
raise
if not result.len:
raise InvalidArtifactError("The artifact is empty")
return result
def get_report_artifact(
self, uuid, report_uuid, artifact_name, password_protected=None, verify=True,
allow_datacenter_redirect=None
):
"""
Get artifact generated by an analysis result for a previously
submitted analysis task.
:param str uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param str report_uuid: the unique report identifier returned as part of
the dictionary returned by get_result().
:param str artifact_name: the name of the artifact as mentioned in the
given report in the dictionary returned by get_result().
:param str password_protected: If provided, use this password to create a zip which will
contain the artifact being fetched. The password provided should be using only
ASCII characters and have max length of 128 characters
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:returns: A stream containing the artifact content
:rtype: stream
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_file_download_url("analysis", ["get_report_artifact"])
params = purge_none({
'uuid': uuid,
'report_uuid': report_uuid,
'artifact_name': artifact_name,
'password_protected': password_protected,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
# NOTE: This API request is completely different because it
# returns real HTTP status-codes (and errors) directly
try:
result = self._api_request(
url,
params,
requested_format='raw',
raw=True,
verify=verify)
except FailedRequestError as exc:
status_code = str(exc.status_code())
if status_code == '401':
raise PermissionDeniedError("Permission denied to access artifacts")
if status_code == '404':
raise InvalidArtifactError(str(exc))
if status_code == '410':
raise InvalidArtifactError("The artifact is no longer available")
if status_code == '412':
raise InvalidUUIDError(str(exc))
# we have nothing more specific to say -- raise the
# original FailedRequestError
raise
return result
def query_task_artifact(self, uuid, artifact_name, raw=False, verify=True,
allow_datacenter_redirect=None):
"""
Query if a specific task artifact is available for download.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param artifact_name: the name of the artifact
:param raw: if True, return the raw JSON/XML results of the API query.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["query_task_artifact"])
params = purge_none({
'uuid': uuid,
'artifact_name': artifact_name,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
return self._api_request(url, params, raw=raw, verify=verify)
def get_ioc_metadata(self, ioc_uuid,
raw=False,
requested_format="json",
verify=True,
allow_datacenter_redirect=None):
"""
Get metadata about a previously generated IOC report by its UUID.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_ioc_metadata`.
:param ioc_uuid: the unique identifier of the IOC, as returned by
`get_results()`.
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML.
If format is not JSON, this implies `raw`.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['ioc', 'get_ioc_metadata'],
requested_format=requested_format)
params = purge_none({
'ioc_uuid': ioc_uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != 'json':
raw = True
return self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
verify=verify)
def get_ioc_report(self, ioc_uuid,
raw=False,
requested_format="json",
verify=True,
allow_datacenter_redirect=None):
"""
Get an IOC report by its UUID.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_ioc_report`.
:param ioc_uuid: the unique identifier of the IOC, as returned by
`get_results()`.
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML.
If format is not JSON, this implies `raw`.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['ioc', 'get_ioc_report'],
requested_format=requested_format)
params = purge_none({
'ioc_uuid': ioc_uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != 'json':
raw = True
return self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
verify=verify)
def create_ioc_from_result(self,
uuid,
report_uuid=None,
raw=False,
requested_format="json",
verify=True,
report_version=None,
allow_datacenter_redirect=None):
"""
Get an IOC report by its UUID.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.create_ioc_from_result`.
:param uuid: the unique identifier of the task,
as returned in the task_uuid field of submit methods.
:param report_uuid: report from which to generated an IOC.
:param requested_format: JSON, XML, or RAW.
If format is not JSON, this implies `raw`.
:param report_version: IOC format.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['ioc', 'create_ioc_from_result'],
requested_format=requested_format)
params = purge_none({
'uuid': uuid,
'report_uuid': report_uuid,
'report_version': report_version,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != 'json':
raw = True
return self._api_request(url,
params,
raw=raw,
requested_format=requested_format,
post=True,
verify=verify)
def get_network_iocs(self, uuid, raw=False, verify=True, allow_datacenter_redirect=None):
"""
Get the network IOCs for an analysis task.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:type uuid: `str`
:param raw: if True, return the raw JSON results of the API query.
:type raw: `bool`
:param verify: if False, disable SSL-certificate verification
:type verify: `bool`
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:return: PCAP data containing IOC information.
:rtype: list(PcapInfoV2)
:raises InvalidAnalysisAPIResponse: If malscape response could not be parsed.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ['ioc', 'get_network_iocs'])
params = purge_none({
'uuid': uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
response = self._api_request(url, params, raw=raw, verify=verify)
if raw:
return response
network_ioc_response = []
try:
for network_ioc in response['data']['network_iocs']:
if network_ioc['pcap_info_version'] != self.SUPPORTED_IOC_PCAP_VERSION:
raise InvalidAnalysisAPIResponse('malscape returns invalid pcap_info version')
# version and url fields are required for pcap json decoding
network_ioc['pcap_info']['version'] = self.SUPPORTED_IOC_PCAP_VERSION
if 'urls' not in network_ioc['pcap_info']:
network_ioc['pcap_info']['urls'] = []
try:
network_ioc_response.append(
llpcap_apiclient.PcapInfoV2.from_json({'data': network_ioc['pcap_info']})
)
except llpcap_apiclient.Error as err:
raise InvalidAnalysisAPIResponse(
'malscape returns invalid network_ioc response: {}'.format(err))
except KeyError as err:
raise InvalidAnalysisAPIResponse(
'malscape returns invalid network_ioc response: missing field {}'.format(err)
)
return network_ioc_response
def completed(self, after, before=None, raw=False, verify=True):
"""
*Deprecated*. Use 'get_completed()'
"""
return self.get_completed(after, before=before,
verify=verify, raw=raw)
def get_completed(self, after, before=None, raw=False, verify=True,
include_score=False):
"""
Get the list of uuids of tasks that were completed
within a given time frame.
The main use-case for this method is to periodically
request a list of uuids completed since the last
time this method was invoked, and then fetch
each result with `get_result()`.
Date parameters to this method can be:
- date string: %Y-%m-%d'
- datetime string: '%Y-%m-%d %H:%M:%S'
- datetime.datetime object
All times are in UTC.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_completed`.
:param after: Request tasks completed after this time.
:param before: Request tasks completed before this time.
:param include_score: If True, the response contains scores together
with the task-UUIDs that have completed
:param raw: if True, return the raw JSON results of the API query.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# better: use 'get_completed()' but that would break
# backwards-compatibility
url = self._build_url("analysis", ["completed"])
if hasattr(before, "strftime"):
before = before.strftime(AnalysisClientBase.DATETIME_FMT)
if hasattr(after, "strftime"):
after = after.strftime(AnalysisClientBase.DATETIME_FMT)
params = purge_none({
'before': before,
'after': after,
'include_score': include_score and 1 or 0,
})
return self._api_request(url, params, raw=raw, verify=verify)
def get_completed_with_metadata(self, after, before=None, raw=False, verify=True):
"""
Get the list of dictionaries, each containing a uuid for a task that was completed
within a given time frame, the resulting score, and additional task_metadata
The main use-case for this method is to periodically
request a list of of dictionaries containing information about each task,
such as the score and task_metadata. Then, additional information can be retrieved
for a task with `get_result()`
Date parameters to this method can be:
- date string: %Y-%m-%d'
- datetime string: '%Y-%m-%d %H:%M:%S'
- datetime.datetime object
All times are in UTC.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_completed_with_metadata`.
:param after: Request tasks completed after this time.
:param before: Request tasks completed before this time.
:param raw: if True, return the raw JSON results of the API query.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_completed_with_metadata"])
if hasattr(before, "strftime"):
before = before.strftime(AnalysisClientBase.DATETIME_FMT)
if hasattr(after, "strftime"):
after = after.strftime(AnalysisClientBase.DATETIME_FMT)
params = purge_none({
'before': before,
'after': after,
})
return self._api_request(url, params, raw=raw, verify=verify)
def get_pending(self, after=None, before=None, raw=False, verify=True):
"""
Get the list of uuids of tasks that are pending (ie: not completed) within a given
time frame.
The main use-case for this method is for stateless clients to perform actions on
pending tasks.
Date parameters to this method can be:
- date string: %Y-%m-%d'
- datetime string: '%Y-%m-%d %H:%M:%S'
- datetime.datetime object
All times are in UTC.
For return values and error codes please see
:py:meth:`malscape_service.api.views.analysis.get_pending`.
:param after: Request tasks completed after this time.
:type after: `str` or `datetime.datetime`
:param before: Request tasks completed before this time.
:type before: `str` or `datetime.datetime`
:param raw: if True, return the raw JSON results of the API query.
:type raw: `bool`
:param verify: if False, disable SSL-certificate verification
:type verify: `bool`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_pending"])
if hasattr(before, "strftime"):
before = before.strftime(AnalysisClientBase.DATETIME_FMT)
if hasattr(after, "strftime"):
after = after.strftime(AnalysisClientBase.DATETIME_FMT)
params = purge_none({'before': before, 'after': after})
return self._api_request(url, params, raw=raw, verify=verify)
def get_progress(self, uuid, raw=False, allow_datacenter_redirect=None):
"""
Get a progress estimate for a previously submitted analysis task.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_results`.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this implies `raw`.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['get_progress'])
params = purge_none({
'uuid': uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
return self._api_request(url, params, raw=raw, post=True)
def get_task_metadata(self, uuid, raw=False, allow_datacenter_redirect=None):
"""
Get information about a task by its UUID.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_task_metadata`.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this implies `raw`.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['get_task_metadata'])
params = purge_none({
'uuid': uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
return self._api_request(url, params, raw=raw)
def export_report(self, uuid, report_type, report_format='PDF', raw=False):
"""
Export a report or a combination of reports for a task.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.export_report`.
:param str uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:param str report_type: The kind of report to generate. See
`ANALYSIS_API_EXPORT_REPORT_TYPES` for supported types.
:param str report_format: The output format. See
`ANALYSIS_API_EXPORT_REPORT_FORMATS` for supported formats.
:param bool raw: if True, return the raw JSON/XML results of the API query.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
if report_type not in ANALYSIS_API_EXPORT_REPORT_TYPES:
raise Error("Invalid report type")
if report_format not in ANALYSIS_API_EXPORT_REPORT_FORMATS:
raise Error("Invalid report format")
url = self._build_url('analysis', ['export_report'])
params = purge_none({
'uuid': uuid,
'report_type': report_type,
'report_format': report_format,
})
return self._api_request(url, params, raw=raw, post=True)
def get_completed_exported_reports(self, resume_after_report_uuid=None, raw=False):
"""
Get the available exported reports.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_completed_exported_reports`.
:param str resume_after_report_uuid: The UUID of the last received report,
we will provide reports generated *after* this one. If not provided, will start from
the earliest stored ID.
:param bool raw: if True, return the raw JSON/XML results of the API query.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['get_completed_exported_reports'])
params = purge_none({
'resume_after_report_uuid': resume_after_report_uuid,
})
return self._api_request(url, params, raw=raw)
def get_exported_report(self, exported_report_uuid):
"""
Get an exported report.
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.get_exported_report`.
:param str exported_report_uuid: The uuid of the exported report that we wish to return
:returns: A stream containing the report content
:rtype: stream
:raises AnalysisAPIError: Analysis API returns HTTP error or error code.
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_file_download_url('analysis', ['get_exported_report'])
params = purge_none({
'exported_report_uuid': exported_report_uuid,
'cdn': self.__use_cdn
})
# NOTE: This API request returns real HTTP status-codes (and errors) directly
try:
response = self._api_request(url, params, raw=True, requested_format='raw')
if isinstance(response, str):
response = io.StringIO(response)
return response
except FailedRequestError as exc:
status_code = str(exc.status_code())
if status_code == '401':
raise PermissionDeniedError("Permission denied to access artifacts")
if status_code == '404':
raise InvalidArtifactError(str(exc))
if status_code == '410':
raise InvalidArtifactError("The artifact is no longer available")
if status_code == '412':
raise InvalidUUIDError(str(exc))
# we have nothing more specific to say -- raise the
# original FailedRequestError
raise
def query_file_hash(self, hash_value=None, algorithm=None, block_size=None,
md5=None, sha1=None, sha256=None, mmh3=None, raw=False):
"""
Search for existing analysis results with the given file-hash.
:param hash_value: The (partial) file-hash.
:param algorithm: One of MD5/SHA1/SHA256
:param block_size: Size of the block (at file start) used for generating
the hash-value. By default (or if 0), the entire file is assumed.
:param md5: Helper to quickly set `hash_value` and `algorithm`
:param sha1: Helper to quickly set `hash_value` and `algorithm`
:param sha256: Helper to quickly set `hash_value` and `algorithm`
:param mmh3: DEPRECATED! Don't use, mmh3 file hash is no longer supported
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this
implies `raw`.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
if (mmh3 or (algorithm and algorithm.lower() == 'mmh3')) and self.__logger:
self.__logger.warning(
"No results will be returned for deprecated mmh3 file-hash query"
)
if md5 or sha1 or sha256 or mmh3:
if hash_value or algorithm:
raise TypeError("Conflicting values passed for hash/algorithm")
if md5 and not sha1 and not sha256 and not mmh3:
hash_value = md5
algorithm = 'md5'
elif sha1 and not md5 and not sha256 and not mmh3:
hash_value = sha1
algorithm = 'sha1'
elif sha256 and not md5 and not sha1 and not mmh3:
hash_value = sha256
algorithm = 'sha256'
elif mmh3 and not md5 and not sha1 and not sha256:
hash_value = mmh3
algorithm = 'mmh3'
else:
raise TypeError("Conflicting values passed for hash/algorithm")
elif not hash_value or not algorithm:
raise TypeError("Missing values for hash_value/algorithm")
url = self._build_url('analysis', ['query/file_hash'])
params = purge_none({
'hash_value': hash_value,
'hash_algorithm': algorithm,
'hash_block_size': block_size,
})
return self._api_request(url, params, raw=raw)
def is_blocked_file_hash(self, hash_value=None, algorithm=None,
block_size=None, md5=None, sha1=None, sha256=None,
mmh3=None, raw=False):
"""
Check if the given file-hash belongs to a malicious file and we have
gathered enough information to block based on this (partial) hash.
:param hash_value: The (partial) file-hash.
:param algorithm: One of MD5/SHA1/SHA256
:param block_size: Size of the block (at file start) used for generating
the hash-value. By default (or if 0), the entire file is assumed.
:param md5: Helper to quickly set `hash_value` and `algorithm`
:param sha1: Helper to quickly set `hash_value` and `algorithm`
:param sha256: Helper to quickly set `hash_value` and `algorithm`
:param mmh3: DEPRECATED! Don't use, mmh3 file hash is no longer supported
:param raw: if True, return the raw JSON/XML results of the API query.
:param requested_format: JSON or XML. If format is not JSON, this implies `raw`.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
if (mmh3 or (algorithm and algorithm.lower() == 'mmh3')) and self.__logger:
self.__logger.warning(
"No results will be returned for deprecated mmh3 file-hash query"
)
if md5 or sha1 or sha256 or mmh3:
if hash_value or algorithm:
raise TypeError("Conflicting values passed for hash/algorithm")
if md5 and not sha1 and not sha256 and not mmh3:
hash_value = md5
algorithm = 'md5'
elif sha1 and not md5 and not sha256 and not mmh3:
hash_value = sha1
algorithm = 'sha1'
elif sha256 and not md5 and not sha1 and not mmh3:
hash_value = sha256
algorithm = 'sha256'
elif mmh3 and not md5 and not sha1 and not sha256:
hash_value = mmh3
algorithm = 'mmh3'
else:
raise TypeError("Conflicting values passed for hash/algorithm")
elif not hash_value or not algorithm:
raise TypeError("Missing values for hash_value/algorithm")
url = self._build_url('analysis', ['query/is_blocked_file_hash'])
params = purge_none({
'hash_value': hash_value,
'hash_algorithm': algorithm,
'hash_block_size': block_size,
})
return self._api_request(url, params, raw=raw)
def query_analysis_engine_tasks(self, analysis_engine_task_uuids,
analysis_engine='analyst', raw=False):
"""
Provide a set of task UUIDs from an analysis engine (such as Analyst
Scheduler or Anubis) and find completed tasks that contain this analysis
engine task.
THIS FUNCTION IS DEPRECATED - DO NOT USE!
"""
url = self._build_url('analysis', ['query/analysis_engine_tasks'])
params = purge_none({
'analysis_engine_task_uuids': ','.join(analysis_engine_task_uuids),
'analysis_engine': analysis_engine,
})
return self._api_request(url, params, raw=raw)
def analyze_sandbox_result(self, analysis_task_uuid,
analysis_engine='anubis',
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
bypass_cache=False,
raw=False,
allow_datacenter_redirect=None):
"""
Provide a task UUID from an analysis engine (such as Analyst Scheduler
or Anubis) and trigger scoring of the activity captured by the analysis
report.
Similar to submitting by exe hash (md5/sha1/sha256) but we can enforce
the precise analysis result (if there are multiple) that we want
to score
For return values and error codes please
see :py:meth:`malscape_service.api.views.analysis.analyze_sandbox_result`.
Requires specific permissions.
:param analysis_task_uuid: The sandbox task UUID to analyze/import.
Lastline-internal/do not use.
:param analysis_engine: The sandbox the task refers to.
Lastline-internal/do not use.
:param full_report_score: if set, this value (between -1 and 101)
determines starting at which scores a full report is returned.
-1 and 101 indicate "never return full report";
0 indicates "return full report at all times"
:param bypass_cache: if True, the API will not serve a cached
result. NOTE: This requires special privileges.
:param raw: if True, return the raw JSON results of the API query.
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('analysis', ['analyze_sandbox_result'])
params = purge_none({
'analysis_task_uuid':analysis_task_uuid,
'analysis_engine': analysis_engine,
'full_report_score': full_report_score,
'bypass_cache': bypass_cache and 1 or 0,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
purge_none(params)
return self._api_request(url, params, raw=raw)
def register_completion(self, uuid, force_register=True, raw=False):
"""
Register submission of a given task_uuid to the user that is authenticated
:param uuid: the unique identifier of the submitted task, as returned in the task_uuid
field of submit methods.
:type uuid: `str`
:param force_register: If set to True indicate that we should create a submission even if
we already have one in place for the same license/task-uuid. If False, don't create a
new one unless needed
:type force_register: `bool`
:returns: Dictionary with information regarding if registered task is already completed
or not
:rtype: `dict`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["register_completion"])
params = purge_none({
'uuid': uuid,
'force_register': force_register and 1 or 0,
})
return self._api_request(url, params, post=True, raw=raw)
def get_analysis_tags(self, uuid, raw=False, verify=True, allow_datacenter_redirect=None,
requested_format="json"):
"""
Get the analysis tags for an analysis task.
:param uuid: the unique identifier of the submitted task,
as returned in the task_uuid field of submit methods.
:type uuid: `str`
:param raw: if True, return the raw JSON results of the API query.
:type raw: `bool`
:param verify: if False, disable SSL-certificate verification
:type verify: `bool`
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:type allow_datacenter_redirect: `bool`
:param requested_format: JSON or XML. If format is not JSON, this implies `raw`.
:type requested_format: `str`
:return: Dictionary of analysis tag data
:rtype `dict`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_analysis_tags"], requested_format=requested_format)
params = purge_none({
'uuid': uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
if requested_format.lower() != 'json':
raw = True
return self._api_request(url, params, raw=raw, verify=verify,
requested_format=requested_format)
def get_child_tasks_recursively(self, uuid, raw=False, verify=True,
allow_datacenter_redirect=None):
"""
Get all the child tasks recursively for the given task UUID.
:param uuid: The task UUID
:type uuid: str
:param raw: if True, return the raw JSON results of the API query.
:type raw: bool
:param verify: if False, disable SSL-certificate verification
:type verify: bool
:param allow_datacenter_redirect: If False, redirection to other datacenters prevented.
:return: The child tasks UUID and their information. The result will be returned
in dict type with the child task UUID as the key and the depth data as
the value
Example:
{ 'task_uuid': 'ffffffffffffffff',
'child_tasks':
{
'aaaaaaaa': {'depth': 1},
'bbbbbbbb': {'depth': 2},
}
}
:rtype: dict(dict)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url("analysis", ["get_child_tasks_recursively"])
params = purge_none({
'uuid': uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
return self._api_request(url, params, raw=raw, verify=verify)
def update_global_whitelist_info(
self, uploader_name, create_uploader=False, md5=None, sha1=None, sha256=None,
confidence=None, is_revoked=False, is_public=False, raw=False
):
"""
Update global whitelist database with file information
:param uploader_name: The name of the global whitelist uploader
:type uploader_name: str
:param create_uploader: True if the uploader name associated with this whitelist update
(usually user email) should be created in whitelist DB if not already present.
:type create_uploader: bool
:param md5: The MD5 hash of the data to query
:type md5: str
:param sha1: The SHA1 hash of the data to query
:type sha1: str
:param sha256: The SHA256 hash of the data to query
:type sha256: str
:param confidence: confidence rating of this whitelist entry (1-100). If None, will
use default confidence of the uploader.
:type confidence: int
:param is_revoked: True if this file is no longer whitelisted.
:type is_revoked: bool
:param is_public: True if this whitelist entry may be made publicly available to Lastline
customers.
:type is_public: bool
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
assert md5 or sha1 or sha256, "Need to provide one of md5/sha1/sha256"
url = self._build_url('management', ['update_global_whitelist_info'])
params = purge_none({
'uploader_name': uploader_name,
'create_uploader': create_uploader,
'md5': md5,
'sha1': sha1,
'sha256': sha256,
'confidence': confidence,
'is_revoked': is_revoked,
'is_public': is_public
})
return self._api_request(url, params, post=True, raw=raw)
def add_untrusted_signer(
self, signer_name, reputation, regexp_common_name=None, regexp_company_name=None, raw=False
):
"""
Add untrusted signer information with negative reputation to database
:param str signer_name: A name identification for the trusted_signer
:param int reputation: The reputation to be assigned to the trusted signer
:param str | None regexp_common_name: The regexp to be applied to identify the
trusted signer
:param str | None regexp_company_name: If present, contains a regular expression
to match the company name
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['add_untrusted_signer'])
if not (-100 <= reputation < 0):
raise Error("Invalid reputation value")
params = purge_none({
'signer_name': signer_name,
'regexp_common_name': regexp_common_name,
'reputation': reputation,
'regexp_company_name': regexp_company_name
})
return self._api_request(url, params, post=True, raw=raw)
def add_trusted_signer(
self, signer_name, reputation, file_stream, hash_type='sha1',
regexp_common_name=None, regexp_company_name=None, raw=False
):
"""
Adding trusted signer fingerprint to the database
:param str signer_name: A name identification for the trusted_signer
:param int reputation: The reputation to be assigned to the trusted signer
:param stream file_stream: Stream to submit
:param str regexp_common_name: The regexp to be applied to identify the
trusted signer
:param str | None regexp_company_name: If present, contains a regular expression
to match the company name
:param str hash_type: Hash algorithm used to generate the certificate fingerprint.
Currently accept 'md5' and 'sha1'
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['add_trusted_signer'])
if not (0 <= reputation <= 100):
raise Error("Invalid reputation value")
if hash_type not in frozenset(['md5', 'sha1']):
raise Error("Invalid hash type: {}".format(hash_type))
params = purge_none({
'signer_name': signer_name,
'reputation': reputation,
'regexp_common_name': regexp_common_name,
'regexp_company_name': regexp_company_name,
'hash_type': hash_type
})
files = {"file": ('file', file_stream)}
return self._api_request(url, params, files=files, post=True, raw=raw)
def remove_signer_fingerprint(self, md5_fingerprint=None, sha1_fingerprint=None, raw=False):
"""
Removing a signer fingerprint from the database. Caller must provide only one of
md5_fingerprint or sha1_fingerprint parameters
:param str | None md5_fingerprint: The md5 hash value of the fingerprint
:param str | None sha1_fingerprint: The sha1 hash value of the fingerprint
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['remove_signer_fingerprint'])
if not md5_fingerprint and not sha1_fingerprint:
raise Error("Please provide either md5 or sha1 fingerprint")
if md5_fingerprint and sha1_fingerprint:
raise Error("Please don't provide both md5 and sha1 fingerprint")
params = purge_none({
'md5_fingerprint': md5_fingerprint,
'sha1_fingerprint': sha1_fingerprint
})
return self._api_request(url, params, post=True, raw=raw)
def remove_signer(self, signer_name, raw=False):
"""
Removing a signer from the database. Note: Signer cannot be removed if there remains a
signer fingerprint associated with it.
:param str signer_name: The name of the signer
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['remove_signer'])
assert signer_name, "Signer name cannot be empty"
params = {'signer_name': signer_name}
return self._api_request(url, params, post=True, raw=raw)
def get_mitre_attack_info(
self, mitre_technique_ids=None, mitre_tactic_ids=None, raw=False, verify=True
):
"""
From a list of mitre technique or tactic ids, get the information for each ID passed in
:param list(str) mitre_technique_ids: A list of Mitre technique IDs
:param list(str) mitre_tactic_ids: A list of Mitre tactic IDs
:return: The dictionary with a mapping of each ID passed in (if we know info about it)
and the info for that ID
Example:
{
'mitre_techniques': {
'ID1':
{
'name': 'foo',
'id': 'ID1',
'description': 'id1 description',
'url': 'https://attack.mitre.org/techniques/ID1',
'tactics': [
{
'id': 'tactic1',
'name': 'bar',
'description': 'tactic1 description',
'url': 'https://attack.mitre.org/tactics/tactic1',
},
{
'id': 'tactic2',
'name': 'bar2',
'description': 'tactic2 description',
'url': 'https://attack.mitre.org/tactics/tactic2',
},
]
}
}
},
'mitre_tactics': {
'ID2': {
'id': 'id2',
'name': 'bar3',
'description': 'id2 description',
'url': 'https://attack.mitre.org/tactics/id2',
}
}
:rtype: dict(dict)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
assert mitre_technique_ids or mitre_tactic_ids, 'Missing mitre_technique and tactics ids'
url = self._build_url("analysis", ["get_mitre_attack_info"])
params = {}
if mitre_technique_ids:
params['mitre_technique_ids'] = ','.join(mitre_technique_ids)
if mitre_tactic_ids:
params['mitre_tactic_ids'] = ','.join(mitre_tactic_ids)
return self._api_request(url, params, raw=raw, verify=verify)
def add_av_detection_score(
self, av_product_name, llfile_class_name, av_class_name=None, av_family_name=None,
score=None,
):
"""
Add an av detection score suppressor entry in the DB
:param str av_product_name: The name of the av product we want to apply the suppression to
:param str llfile_class_name: The llfile class name we want to apply the suppression to,
for example 'File' or 'DosExeFile'. This should be the exact same name of the
llfile class that will be suppressed.
:param str av_class_name: If provided, only add suppression for the passed av class,
for example 'trojan'. This should be the exact class name that we are using when doing
the suppression of the sample. If None is provided, we will look on the other filters
for doing the suppression (llfile_class/av_family_name)
:param str av_family_name: If provided, only add suppression for the passed av family,
for example 'manilla'. This should be the exact family name that we are using when
doing the suppression of the sample. If None is provided, we will look on the other
filters for doing the suppression (llfile_class/av_class_name)
:param int score: If provided, when suppressing the detection score, set this score
instead. If not provided, we will always set the score 0
:return: A dictionary with information about the addition of the suppression in the
database, for example:
If update successfully
{
'updated': True,
'message': 'Av detection score has been added or updated'
}
If update failed
{
'updated': False,
'message': 'Score is not in a valid range'
}
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['add_av_detection_score'])
if score is not None and not (0 <= score <= 100):
raise Error("Invalid score value")
params = purge_none({
'av_product_name': av_product_name,
'llfile_class_name': llfile_class_name,
'av_class_name': av_class_name,
'av_family_name': av_family_name,
'score': score
})
return self._api_request(url, params, post=True)
def delete_av_detection_score(
self, av_product_name, llfile_class_name, av_class_name=None, av_family_name=None
):
"""
Remove an av detection score suppressor entry in the DB
:param str av_product_name: The name of the av product in the entry we want to delete
:param str llfile_class_name: The llfile class name in the entry we want to delete,
for example 'File' or 'DosExeFile'. This should be the exact same name of the
llfile class that was stored before, when doing the suppression
:param str av_class_name: If provided, only delete the entry with this av_class,
for example 'trojan'. This should be the exact class name that was used when creating
the suppression earlier.
:param str av_family_name: If provided, only delete the entry with this av_family,
for example 'manilla'. This should be the exact family name that was used when creating
the suppression earlier.
:return: A dictionary with information about the deletion of the suppression in the
database, for example:
If update successfully
{
'removed': True,
'message': 'Av detection has been removed'
}
If update failed
{
'removed': False,
'message': 'Removing av detection failed, nothing changed'
}
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['delete_av_detection_score'])
params = purge_none({
'av_product_name': av_product_name,
'llfile_class_name': llfile_class_name,
'av_class_name': av_class_name,
'av_family_name': av_family_name,
})
return self._api_request(url, params, post=True)
def _api_request(self,
url,
params=None,
files=None,
timeout=None,
post=False,
raw=False,
requested_format="json",
verify=True):
"""
Send an API request and return the results.
:param url: API URL to fetch.
:param params: GET or POST parameters.
:param files: files to upload with request.
:param timeout: request timeout in seconds.
:param post: use HTTP POST instead of GET
:param raw: return the raw json results of API query
:param requested_format: JSON or XML. If format is not JSON, this implies `raw`.
:param verify: if True, verify ssl, otherwise False
"""
raise NotImplementedError("%s does not implement api_request()" % self.__class__.__name__)
def _process_response_page(self, page, raw, requested_format, disposition=None):
"""
Helper for formatting/processing api response before returning it.
"""
if raw or requested_format.lower() != "json":
# Handle special dispositions
if disposition:
disp_type = disposition.get('type')
disp_params = disposition.get('params')
if disp_type == 'attachment':
return NamedStringIO(
page, name=disp_params.get('filename'))
return page
#why does pylint think result is a bool??
#pylint: disable=E1103
result = simplejson.loads(page)
success = result['success']
if success:
return result
error_code = result.get('error_code', None)
# raise the most specific error we can
exception_class = AnalysisClientBase.ERRORS.get(error_code, AnalysisAPIError)
raise exception_class(result['error'], error_code)
def rescore_task(self, md5, sha1,
min_score=0, max_score=100,
threat=None, threat_class=None,
uploader_name='malscape-rescoring',
create_uploader=False,
force_local=False, raw=False):
"""
Enforce re-scoring of a specific file based on the
submitted file's md5/sha1 hash. Requires specific permissions.
md5 and sha1 must be provided. sha1 must
match with the md5 that was provided. Existing manual-score threat/
threat-class information will not be overwritten unless an empty-
string ('') is passed to this function.
This API-call returns the task-UUIDs that were triggered for rescoring.
:param md5: the md5 hash of the submitted file.
:param sha1: the sha1 hash of the submitted file.
:param uploader_name: The name of the uploader (usually user email) to put into the global
whitelist database.
:type uploader_name: str
:param create_uploader: True if the uploader name associated with this whitelist update
should be created in whitelist DB if not already present.
:type create_uploader: bool
:param force_local: if True, enforce that the manual score is applied
only locally. This is the default for on-premise instances and
cannot be enforced there. Requires special permissions.
:param raw: if True, return the raw JSON/XML results of the API query.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
assert md5 and sha1, "Please provide md5 and sha1"
url = self._build_url('management', ['rescore'])
params = purge_none({
'md5': md5,
'sha1': sha1,
'min_score': min_score,
'max_score': max_score,
'threat': threat,
'threat_class': threat_class,
'uploader_name': uploader_name,
'create_uploader': create_uploader,
# use the default if no force is set
'force_local': force_local and 1 or None,
})
return self._api_request(url, params, raw=raw, post=True)
def rescore_backend_task(self, report_uuid, score=None, reputation=None, raw=False):
"""
Enforce re-scoring of a specific backend task using the new score/reputation.
One of score/reputation must be provided, as appropriate to the backend type.
Alternatively, if no score or reputation is provided the task associated with the
backend task is rescored without changing the backend task score.
:param str report_uuid: the unique identifier of the backend task, as returned in the
report_uuid field of get_task_status method for each backend engaged in the task.
:param int score: the new score of the backend task, for backend tasks with a score column.
:param int reputation: the new reputation of the backend task, for backend tasks with a
reputation column.
:param bool raw: if True, return the raw JSON/XML results of the API query.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
assert report_uuid is not None, "Please provide report_uuid"
assert score is None or reputation is None, "Please provide only ONE of score/reputation"
url = self._build_url('management', ['rescore_backend'])
params = purge_none({
'report_uuid': report_uuid,
'score': score,
'reputation': reputation,
})
return self._api_request(url, params, raw=raw, post=True)
def rescore_scanner(self, scanner, after, before,
min_score=0, max_score=100,
min_scanner_score=0, max_scanner_score=100,
min_version=0, max_version=None,
test_flag=None, force=False,
raw=False):
"""
Find tasks that triggered a certain scanner and mark them for
reprocessing.
This API-call returns the task-UUIDs that were triggered for rescoring.
:param scanner: Name of the scanner.
:param after: Reprocess tasks completed after this time.
:param before: Reprocess tasks completed before this time.
:param min_score: Minimum score of tasks to reprocess.
:param max_score: Maximum score of tasks to reprocess.
:param min_scanner_score: Minimum score of scanner detection (on backend
task) to reprocess.
:param max_scanner_score: Maximum score of scanner detection (on backend
task) to reprocess.
:param min_version: Minimum version of scanner to reprocess.
:param max_version: Maximum version of scanner to reprocess.
:param test_flag: If True, only affect backend-tasks where the scanner
was in *test* mode; if False, only affect backend-tasks where the
scanner was in *real* mode; otherwise affect all backend-tasks
regardless of the *test* flag.
:param force: By default, the API will refuse rescoring any scanners that
affect more than 100 tasks. To rescore large amounts, distribute the
work over multiple time-windows. This safety can be disabled by
setting the *force* parameter to True.
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
if hasattr(before, "strftime"):
before = before.strftime(AnalysisClientBase.DATETIME_FMT)
if hasattr(after, "strftime"):
after = after.strftime(AnalysisClientBase.DATETIME_FMT)
url = self._build_url('management', ['rescore_scanner'])
params = purge_none({
'scanner': scanner,
'after': after,
'before': before,
'min_score': min_score,
'max_score': max_score,
'min_scanner_score': min_scanner_score,
'max_scanner_score': max_scanner_score,
'min_version': min_version,
'max_version': max_version,
})
if test_flag is not None:
params['test_flag'] = test_flag and 1 or 0
if force:
params['force'] = 1
return self._api_request(url, params, raw=raw, post=True)
def suppress_scanner(self, scanner, max_version, raw=False):
"""
Mark a scanner as suppressed.
:param scanner: Name of the scanner.
:param max_version: Version of scanner up to which it is supposed to be
suppressed. So, if the first scanner-version that should be used
for scoring is X, provide (X-1).
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
url = self._build_url('management', ['suppress_scanner'])
params = purge_none({
'scanner': scanner,
'max_version': max_version,
})
return self._api_request(url, params, raw=raw, post=True)
def create_ticket(self, uuid=None, md5=None, sha1=None,
min_score=0, max_score=100, summary=None, labels=None,
is_false_negative=False, is_false_positive=False,
is_from_customer=False, is_from_partner=False,
is_falses_ml=False, force=True, raw=False):
"""
Create an ANREV ticket for a specific task or multiple tasks based on
the submitted file. Requires specific permissions.
At least one of uuid/md5/sha1 must be provided. If both file-hashes are
provided, they must match the same file.
:param str uuid: the unique identifier of the submitted task, as returned in the task_uuid
field of submit methods.
:param str md5: the md5 hash of the submitted file.
:param str sha1: the sha1 hash of the submitted file.
:param bool force: if True, enforce the generation of a ticket, even if none of the
task-analysis rules would have generated a ticket
:param int min_score: Limit generation of tickets to tasks above the given threshold
:param int max_score: Limit generation of tickets to tasks below the given threshold
:param str summary: Optional summary (title) to use for the ticket.
:param iterable[str] labels: Optional set of labels to assign to a task
:param bool is_false_negative: Helper parameter to add the standard FN label
:param bool is_false_positive: Helper parameter to add the standard FP label
:param bool is_from_customer: Helper parameter to add the standard from-customer label
:param bool is_from_partner: Helper parameter to add the standard from-partner label
:param bool is_falses_ml: Helper parameter to add the standard falses-ml label
:param bool raw: if True, return the raw JSON/XML results of the API query.
:returns: a dictionary detailing the result.
- If successful a dictionary with at least the following keys:
- result: the result of the action
- task_uuid: the task uuid of the analysis
- score: the score given to that analysis
- If the outcome includes the creation of a ticket, then the following keys are added:
- ticket_id: the (internal) Jira id of the ticket
- ticket_key: the (public) Jira key of the ticket
- ticket_url: the URL of the ticket
Examples:
- Creating a ticket about a false positive from a customer:
analysis.create_ticket(
md5="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
is_false_positive=True,
is_from_customer=True
)
- Content of a successful response:
{
"result": "Ticket generated",
"task_uuid": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"score": 66,
"ticket_id": "123",
"ticket_key": "ANREV-333",
"ticket_url": "https://lastline.atlassian.net/browse/ANREV-333"
}
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
assert uuid or md5 or sha1, "Please provide task-uuid/md5/sha1"
url = self._build_url('management', ['create_ticket'])
if labels:
labels = set(labels)
else:
labels = set()
if is_false_negative:
labels.add('false_negatives')
if is_false_positive:
labels.add('false_positives')
if is_from_customer:
labels.add('from-customer')
if is_from_partner:
labels.add('from-partner')
if is_falses_ml:
labels.add('falses-ml')
if labels:
labels_list = ','.join(labels)
else:
labels_list = None
params = purge_none({
'uuid': uuid,
'md5': md5,
'sha1': sha1,
'min_score': min_score,
'max_score': max_score,
'force': force and 1 or 0,
'summary': summary,
'labels': labels_list,
})
return self._api_request(url, params, raw=raw, post=True)
def get_license_activity(self, query_start=None, query_end=None,
raw=False):
"""
Fetch license activity information.
DEPRECATED. DO NOT USE
"""
unused = query_start, query_end, raw
assert False, "Call to deprecated API function"
def get_api_utc_timestamp(self):
"""
Query the API to get its UTC timestamp: do this *before* submitting
to avoid racing or clock-skew with the local clock
:returns: Current UTC timestamp according to API
:rtype: `datetime.datetime`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
start_info = self.get_completed(
after='2039-12-31 23:59:59'
)
return parse_datetime(start_info['data']['before'])
def get_status(self):
"""
Get the status of malscape, indicating if all is ok or not
:param raw: if True, return the raw JSON results of the API query.
:returns: A dict with the load results:
{
'all_ok': An int which can be 0 or 1 indicating that everything is ok (1) or if
something is not correct (0) in malscape
}
"""
url = self._build_url('management', ['get_status'])
return self._api_request(url)
def ping(self, raw=False, verify=True):
"""
Check if base API responds.
"""
url = self._build_url('authentication', ['ping'])
return self._api_request(url, raw=raw, verify=verify)
def is_risky_analysis_artifact(self,
report_uuid,
artifact_name,
task_uuid=None,
raw=False,
verify=True,
allow_datacenter_redirect=None):
"""
Check if the artifact can potentially be malicious using the artifact information.
:param str report_uuid: Identifier of the requested report to which the artifact is assigned
:param str artifact_name: Identifier of task artifact
:param str|None task_uuid: Unique identifier for the task that analyzed the artifact. If not
present, will only look for artifact in local datacenter.
:param bool raw: if True, return the raw JSON results of the API query.
:param bool verify: if True, verify ssl, otherwise False
:param bool|None allow_datacenter_redirect: If False, redirection to other datacenters
prevented.
:return: True if the artifact is risky, False otherwise
:rtype: bool
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
:raises InvalidArtifactError: Invalid artifact uuid.
"""
if report_uuid and artifact_name:
params = purge_none({
'artifact_uuid': "{}:{}".format(report_uuid, artifact_name),
'uuid': task_uuid,
'allow_datacenter_redirect': allow_datacenter_redirect,
})
url = self._build_url('analysis', ['is_risky_analysis_artifact'])
return self._api_request(url, params, raw=raw, verify=verify)
raise InvalidArtifactError("The report uuid and artifact name must both be provided")
class AnalysisClient(AnalysisClientBase):
"""
Client for the Analysis API.
A client for the Analysis API that accesses the API through the web,
using key and api token for authentication, and the python
requests module for sending requests.
NOTE: This class is not thread safe
"""
# maximum unsuccessful login attempts in a row
MAX_LOGIN_ATTEMPTS = 2
DEFAULT_TIMEOUT = 60
_AUTH_METHOD__LICENSE = 'license'
_AUTH_METHOD__UNAUTHENTICATED = 'unauthenticated'
_AUTH_METHODS = (_AUTH_METHOD__LICENSE, _AUTH_METHOD__UNAUTHENTICATED)
@classmethod
def from_config(cls, config, config_section='analysis', logger=None):
"""
Factory method for instantiating an API client from config
:param ConfigParser.ConfigParser config: Config object to rad from
:param str config_section: Section to read from
:param logging.Logger logger: Logger to use for API client instance
:return AnalysisClient: Instantiated client
:raise ConfigParser.Error: The provided configuration is invalid or incomplete
:raise ValueError: The provided configuration contains incorrect types for values
"""
auth_method = cls._AUTH_METHOD__LICENSE
try:
auth_method = config.get(config_section, 'auth_method')
except configparser.NoOptionError:
pass # keep default
else:
if auth_method not in cls._AUTH_METHODS:
raise ValueError("Unsupported auth_method '{}'".format(auth_method))
try:
verify_ssl = config.getboolean(config_section, 'verify_ssl')
except configparser.NoOptionError:
verify_ssl = True
try:
timeout = config.getfloat(config_section, 'timeout')
except configparser.NoOptionError:
timeout = AnalysisClient.DEFAULT_TIMEOUT
key = None
api_token = None
if auth_method == cls._AUTH_METHOD__LICENSE:
key = config.get(config_section, 'key')
try:
api_token = config.get(config_section, 'api_token')
except configparser.NoOptionError:
pass # for sensor-licenses, the API-token is optional
try:
use_cdn = config.getboolean(config_section, 'use_cdn')
except configparser.NoOptionError:
use_cdn = None
return cls(
base_url=config.get(config_section, 'url'),
key=key,
api_token=api_token,
verify_ssl=verify_ssl,
timeout=timeout,
use_cdn=use_cdn,
logger=logger,
config=config,
)
def __init__(
self,
base_url,
key,
api_token,
logger=None,
ca_bundle=None,
verify_ssl=True,
use_curl=False,
timeout=DEFAULT_TIMEOUT,
use_cdn=None,
proxies=None,
config=None
):
"""
:param str base_url: URL where the lastline analysis API is located. (required)
:param str|None key: API key for the Lastline Analyst API. If None is provided, the
client will not embed any type of authentication assuming an upstream proxy will
embed the required credentials
:param str|None api_token: Optional API token for the Lastline Analyst API to embed.
The API token is optional for certain types of authentication schemas and may be
omitted
:param logging.Logger|None logger: if provided, should be a python logging.Logger object
or object with similar interface.
:param object|None ca_bundle: if provided, location of Certification Authority bundle
to use for authentication. This should not be required
if certificates are properly setup on the system.
:param bool verify_ssl: if True, verify SSL certificates. This overrides the
per-call parameter
:param dict proxies: dictionary with per-protocol proxy to use to use
(e.g. { 'http': 'localhost:3128', 'https': 'localhost:3128' }
:param float timeout: default timeout (in seconds) to use for network requests.
Set to None to disable timeouts
:param bool use_cdn: If False, will return CDN eligible content directly. Otherwise, will
use the CDN to deliver the content.
"""
AnalysisClientBase.__init__(self, base_url, use_cdn, logger, config)
self.__key = key
self.__api_token = api_token
self.__ca_bundle = ca_bundle
self.__verify_ssl = verify_ssl
self.__logger = logger
self.__timeout = timeout
if use_curl and logger:
logger.warning("Ignoring deprecated use_curl option")
if proxies is None and config:
self.__proxies = get_proxies_from_config(config)
else:
self.__proxies = proxies
self.__session = None
def set_key(self, key):
self.__key = key
self._logout()
def set_api_token(self, api_token):
self.__api_token = api_token
self._logout()
def set_ssl_verification(self, value=True):
"""
Allow enabling/disabling SSL verification on the fly
"""
self.__verify_ssl = value
def _login(self):
"""
Creates auth session for malscape-service.
"""
# if the client runs in a mode without authentication, building a session
# and doing an explicit login is not necessary.
if self.__key is None:
self.__session = MockSession(logger=self._logger())
return
self.__session = requests.session()
url = self._build_url('authentication', ['login'])
params = {'key': self.__key}
if self.__api_token:
params['api_token'] = self.__api_token
try:
self._api_request(url=url, params=params, post=True, verify=self.__verify_ssl)
except FailedRequestError as exc:
if exc.status_code() != 404:
raise
if self._logger():
self._logger().debug("Login raised %s: switching to legacy authentication", exc)
# the API does not support the login call, and thus not session-based authentication.
# Switch to embedding credentials in each request
self.__session = MockSession(credentials=params, logger=self._logger())
def _logout(self):
"""
Destroys auth session for malscape-service.
"""
if not self.__session:
return
self.__session.close()
self.__session = None
def _save_stream_positions(self, files):
"""
Stores stream_positions for files
:param files: dictionary with filestreams, according to requests.request 'files' parameter
:type files: `dict`
:return: dictionary with filenames and according stream positions
:rtype: `dict`
"""
result = {}
if not files:
return result
for file_name, file_object in files.items():
# 'files' value can be tuple or file-like object, according to python-requests docs
if isinstance(file_object, tuple):
file_stream = file_object[1]
else:
file_stream = file_object
result[file_name] = file_stream.tell()
return result
def _restore_stream_positions(self, stream_positions, files):
"""
Restores stream positions, saved earlier
:param stream_positions: dictionary 'filename: position'
:type stream_positions: `dict`
:param files: dictionary with filestreams, according to requests.request 'files' parameter
:type files: `dict`
"""
for file_name, stream_position in stream_positions.items():
file_object = files[file_name]
if isinstance(file_object, tuple):
file_stream = file_object[1]
else:
file_stream = file_object
file_stream.seek(stream_position)
def _api_request(self,
url,
params=None,
files=None,
timeout=None,
post=False,
raw=False,
requested_format="json",
verify=True):
# first, perform authentication, if we have no session
if not self.__session:
self._login()
if self._logger():
self._logger().info("Requesting %s" % url)
if not params:
params = {}
# we allow anyone setting this flag, but only admins will get any data back
if self.REQUEST_PERFDATA:
params['perfdata'] = 1
method = "GET"
data = None
if post or files:
method = "POST"
data = params
params = None
if not self.__verify_ssl or not verify:
verify_ca_bundle = False
elif self.__ca_bundle:
verify_ca_bundle = self.__ca_bundle
else:
verify_ca_bundle = True
# save stream positions in case of reauthentication
stream_positions = self._save_stream_positions(files)
# start authentication / reauthentication loop
login_attempt = 1
while True:
try:
response = self.__session.request(
method, url,
params=params, data=data, files=files,
timeout=timeout or self.__timeout,
verify=verify_ca_bundle,
proxies=self.__proxies)
# raise if anything went wrong
response.raise_for_status()
except requests.HTTPError as exc:
if self.__logger:
self.__logger.warning("HTTP Error contacting Lastline Analyst API: %s", exc)
if exc.response is not None:
status_code = exc.response.status_code
msg = exc.response.text
else:
status_code = None
msg = None
# raise a wrapped exception - if the HTTP status code maps into a specific
# exception class, use that class
try:
exception_class = AnalysisClientBase.HTTP_ERRORS[status_code]
except KeyError:
raise FailedRequestError(msg=msg, error=exc, status_code=status_code)
else:
raise exception_class(msg=msg)
except requests.RequestException as exc:
if self.__logger:
self.__logger.warning("Error contacting Lastline Analyst API: %s", exc)
# raise a wrapped exception
raise CommunicationError(error=exc)
# Get the response content, as a unicode string if the response is
# textual, as a regular string otherwise.
content_type = response.headers.get("content-type")
if content_type and (
content_type.startswith("application/json") or
content_type.startswith("text/")):
response_data = response.text
else:
response_data = response.content
# Get the response disposition if defined
disposition = None
content_disposition = response.headers.get("content-disposition")
if content_disposition:
# Always returns a couple type, params even if
# no parameters are provided or the string is empty
disp_type, disp_params = cgi.parse_header(content_disposition)
if disp_type:
disposition = {'type': disp_type.lower(),
'params': disp_params}
try:
response_result = self._process_response_page(
response_data, raw, requested_format, disposition
)
except AuthenticationError:
self._logout()
# if this is not a real session, we have embedded the credentials in the request
# and retrying won't change anything
if isinstance(self.__session, MockSession):
raise
# don't try more than N times - we essentially need to only retry establishing a
# session, so N>2 doesn't make too much sense
if login_attempt >= self.MAX_LOGIN_ATTEMPTS:
raise AuthenticationError(
'login failed for {} times'.format(self.MAX_LOGIN_ATTEMPTS))
if self.__logger:
self.__logger.warning('attempting to restore connection for %d time',
login_attempt)
self._login()
self._restore_stream_positions(stream_positions, files)
login_attempt += 1
else:
# if all goes well, just return result
return response_result
class SubmittedTask(object):
"""
Representation of a task that was submitted
"""
def __init__(self, task_uuid, score=None, error=None, error_exception=None,
submission_timestamp=None, insufficient_task_input_errors=None, expires=None):
"""
:param task_uuid: The returned task-UUID, if one was returned
:type task_uuid: `str` | None
:param score: The returned score, if one is available
:type score: `int` | None
:param error: The returned error, if submission failed
:type error: `str` | None
:param error_exception: Detailed exception data, if submission failed
:type error_exception: `AnalysisAPIError` | None
:param submission_timestamp: time stamp of when this task was submitted
:type submission_timestamp: datetime.datetime | None
:param insufficient_task_input_errors: error codes that describe where
the input was not sufficient to properly analyze the task.
:type insufficient_task_input_errors: list(int) | None
:param expires: The earliest time that the results of this task may expire.
:type expires: datetime.datetime | None
"""
self.__task_uuid = task_uuid
self.__submission_timestamp = submission_timestamp
self.__error = error
self.__error_exception = error_exception
self.__score = score
self.__insufficient_task_input_errors = insufficient_task_input_errors
self.__expires = expires
@property
def task_uuid(self):
return self.__task_uuid
@property
def error(self):
return self.__error
@property
def error_exception(self):
return self.__error_exception
@property
def score(self):
if self.__score is not None:
return self.__score
if self.error:
return 0
raise NoResultFoundError("Task not complete")
@property
def submission_timestamp(self):
return self.__submission_timestamp
@property
def insufficient_task_input_errors(self):
return self.__insufficient_task_input_errors
@property
def expires(self):
return self.__expires
def set_score(self, score):
"""
Update the score of this task. May only be done if not set yet (see
`self.is_complete()`).
:param score: Score to set
:type score: `int`
"""
if self.__score is not None:
raise Error("Double-setting score")
if 0 <= score <= 100:
self.__score = int(score)
else:
raise Error("Invalid score")
def is_complete(self):
"""
Check if this task represents a complete task
:returns: True if this task is marked completed, False otherwise.
:rtype: `bool`
"""
return self.__score is not None or self.__error is not None
def __eq__(self, other):
return isinstance(other, SubmittedTask) and other.task_uuid == self.task_uuid
def __str__(self):
s = "AnalysisTask"
if self.task_uuid:
s += " {}".format(self.task_uuid)
if self.submission_timestamp:
s += " {}".format(self.submission_timestamp)
if self.error_exception:
s += "(error: {})".format(self.error_exception)
elif self.error:
s += "(error: {})".format(self.error)
elif self.__score is not None:
s += "(score: {})".format(self.__score)
return s
class SubmittedFileTask(SubmittedTask):
"""
Representation of a file task that was submitted
"""
def __init__(self, file_md5, file_sha1, file_sha256, task_uuid,
filename=None, score=None,
error=None, error_exception=None, submission_timestamp=None,
insufficient_task_input_errors=None, expires=None):
"""
:param file_md5: The MD5 of the submitted file
:type file_md5: `str`
:param file_sha1: The SHA1 of the submitted file
:type file_sha1: `str`
:param file_sha256: The SHA256 of the submitted file
:type file_sha256: `str`
:param task_uuid: The returned task-UUID, if one was returned
:type task_uuid: `str` | None
:param filename: The name of the file that was submitted
:type filename: `str` | None
:param score: The returned score, if one is available
:type score: `int` | None
:param error: The returned error, if submission failed
:type error: `str` | None
:param error_exception: Detailed exception data, if submission failed
:type error_exception: `AnalysisAPIError` | None
:param submission_timestamp: time stamp of when this task was submitted
:type submission_timestamp: datetime.datetime | None
:param insufficient_task_input_errors: error codes that describe where invalid
input was provided
:type insufficient_task_input_errors: list(int) | None
:param expires: The earliest time that the results of this task may expire.
:type expires: datetime.datetime | None
"""
if not file_md5 or len(file_md5) != 32:
raise ValueError("Invalid file MD5")
if not file_sha1 or len(file_sha1) != 40:
raise ValueError("Invalid file SHA1")
if not file_sha256 or len(file_sha256) != 64:
raise ValueError("Invalid file SHA256")
SubmittedTask.__init__(
self,
task_uuid=task_uuid,
score=score,
error=error,
error_exception=error_exception,
submission_timestamp=submission_timestamp,
insufficient_task_input_errors=insufficient_task_input_errors,
expires=expires,
)
self.__file_md5 = file_md5
self.__file_sha1 = file_sha1
self.__file_sha256 = file_sha256
self.__filename = filename
@property
def file_md5(self):
return self.__file_md5
@property
def file_sha1(self):
return self.__file_sha1
@property
def file_sha256(self):
return self.__file_sha256
@property
def filename(self):
return self.__filename
def __str__(self):
s = "%s: MD5=%s, SHA1=%s" % (
SubmittedTask.__str__(self),
self.file_md5,
self.file_sha1,
)
if self.file_sha256:
s += ", SHA256=%s" % self.file_sha256
if self.filename:
s += ", name=%s" % self.filename
return s
class SubmittedURLTask(SubmittedTask):
"""
Representation of a URL task that was submitted
"""
def __init__(self, url, task_uuid, referer=None, score=None, error=None, error_exception=None,
submission_timestamp=None, expires=None):
"""
:param url: The URL that was submitted
:type url: `str`
:param task_uuid: The returned task-UUID, if one was returned
:type task_uuid: `str` | None
:param referer: The refer(r)er which was submitted for the URL
:type referer: `str`
:param score: The returned score, if one is available
:type score: `int` | None
:param error: The returned error, if submission failed
:type error: `str` | None
:param error_exception: Detailed exception data, if submission failed
:type error_exception: `AnalysisAPIError` | None
:param submission_timestamp: time stamp of when this task was submitted
:type submission_timestamp: datetime.datetime | None
:param expires: The earliest time that the results of this task may expire.
:type expires: datetime.datetime | None
"""
SubmittedTask.__init__(
self,
task_uuid=task_uuid,
score=score,
error=error,
error_exception=error_exception,
submission_timestamp=submission_timestamp,
expires=expires,
)
self.__url = url
self.__referer = referer
@property
def url(self):
return self.__url
@property
def referer(self):
return self.__referer
def __str__(self):
s = "%s: URL=%s" % (
SubmittedTask.__str__(self),
self.url,
)
if self.referer:
s += ", refer(r)er=%s" % self.referer
return s
class ExportedReport(object):
"""
Representation of a report that was exported
"""
def __init__(
self,
task_uuid,
report_uuid=None,
report_stream=None,
export_timestamp=None,
export_error=None):
"""
:param str task_uuid: Unique identifier for the task that was exported
:param str|None report_uuid: Unique identifier for the exported report
:param stream|None report_stream: Stream containing the downloaded report
:param datetime.datetime export_timestamp: Time report was exported
:param str|None export_error: Error that occurred while requesting or performing the export
"""
self.__task_uuid = task_uuid
self.__report_uuid = report_uuid
self.__report_stream = report_stream
self.__export_timestamp = export_timestamp
self.__export_error = export_error
@property
def task_uuid(self):
return self.__task_uuid
@property
def report_uuid(self):
return self.__report_uuid
@property
def report_stream(self):
return self.__report_stream
@property
def export_timestamp(self):
return self.__export_timestamp
@property
def export_error(self):
return self.__export_error
def __eq__(self, other):
return (isinstance(other, ExportedReport)
and other.task_uuid == self.task_uuid
and other.report_uuid == self.report_uuid
and other.export_error == self.export_error)
class SubmissionHelper(object):
"""
Helper class for handling submission and task retrieval
"""
# The max number of task-uuids to print in logging when telling how many tasks
# are still pending to be completed
MAX_WAITING_TASK_UUIDS_NUM = 10
def __init__(self, analysis_client, logger=None, num_retries=10):
"""
:param analysis_apiclient.AnalysisClientBase analysis_client: The client to use
:param logging.Logger|None logger: Optional logger to use. If None is provided, log to
stdout
:param int num_retries: Number of times to retry network requests on error.
Use 0 to disable retries or None for endless retries
"""
self.__analysis_client = analysis_client
self.__num_retries = num_retries
if logger:
self.__logger = logger
else:
self.__logger = logging.getLogger('lastline.analysis.api_client')
self.__logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
self.__logger.addHandler(ch)
def get_api_utc_timestamp(self):
"""
Query the API to get its UTC timestamp: do this *before* submitting
to avoid racing or clock-skew with the local clock
:returns: Current UTC timestamp according to API
:rtype: `datetime.datetime`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
return self.__analysis_client.get_api_utc_timestamp()
def submit_file_stream(self, file_stream, **kwargs):
"""
Submit a file for analysis and retrieve results if they are immediately
available. Additional parameters passed to this function are forwarded
to the client (see `submit_file_hash` or `submit_file`).
NOTE: To avoid a race-condition between submission and polling for
results, use the following approach::
helper = SubmissionHelper(<client>)
ts = helper.get_api_utc_timestamp()
submission = helper.submit_file_stream(<stream>)
helper.wait_for_completion_of_submission(submission, ts)
or use the `submit_file_streams_and_wait_for_completion()` helper
function.
NOTE: You may provide any of the parameters
- file_md5,
- file_sha1, or
- file_sha256
to avoid repeated file-hash calculations. Any hash not provided will be
generated from the given file-stream.
:param file_stream: Stream to submit
:type file_stream: `stream`
:returns: Submission results
:rtype: `SubmittedFileTask`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# get the current seek position to put the stream back to exactly
# this point after reading the file for computing hashes
file_pos = file_stream.tell()
try:
file_md5 = kwargs.pop('file_md5')
if not file_md5: raise KeyError()
except KeyError:
file_md5 = hash_stream(file_stream, 'md5')
file_stream.seek(file_pos)
try:
file_sha1 = kwargs.pop('file_sha1')
if not file_sha1: raise KeyError()
except KeyError:
file_sha1 = hash_stream(file_stream, 'sha1')
file_stream.seek(file_pos)
try:
file_sha256 = kwargs.pop('file_sha256')
if not file_sha256: raise KeyError()
except KeyError:
file_sha256 = hash_stream(file_stream, 'sha256')
file_stream.seek(file_pos)
try:
filename = kwargs.pop('filename')
except KeyError:
if hasattr(file_stream, 'name'):
filename = os.path.basename(file_stream.name)
else:
# auto-select in the API
filename = None
# submit_file_hash does not take the "delete_after_analysis" parameter
try:
delete_after_analysis = kwargs.pop('delete_after_analysis')
except KeyError:
delete_after_analysis = False
# same for "mime_type" (only for submit_file_hash)
try:
mime_type = kwargs.pop('mime_type')
except KeyError:
mime_type = None
self.__logger.info("Submitting file %s (md5=%s, sha1=%s, sha256=%s)",
filename or '<unnamed>', file_md5, file_sha1,
file_sha256)
result_data = None
task_uuid = None
submission_timestamp = None
score = None
error = None
error_exception = None
submit_by_file = True
insufficient_task_input_errors = None
expires = None
# Only submit file hash if bypass_cache is not enabled
if not kwargs.get('bypass_cache'):
try:
result_data = self.__analysis_client.submit_file_hash(
md5=file_md5, sha1=file_sha1, sha256=file_sha256,
filename=filename,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
mime_type=mime_type,
**kwargs
)['data']
except AnalysisAPIError as err:
# NOTE: In theory we should only submit again if the file is not
# known, but submitting again either way does not hurt
self.__logger.debug("Submitting file by hash failed: %s", err)
else:
# NOTE: If bypass_cache is not enabled and we submitted file hash successfully
# we will not submit the file again.
submit_by_file = False
if submit_by_file:
try:
result_data = self.__analysis_client.submit_file(
file_stream=file_stream,
filename=filename,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
delete_after_analysis=delete_after_analysis,
**kwargs
)['data']
except AnalysisAPIError as err2:
# we are handling this error, and it's not a bug in the code, so
# logged just as warning
self.__logger.warning(
"Submitting file %s (md5=%s, sha1=%s, sha256=%s) failed: %s",
filename or '<unnamed>', file_md5, file_sha1, file_sha256, err2)
error = str(err2)
error_exception = err2
if result_data is not None:
try:
task_uuid = result_data['task_uuid']
except KeyError:
# this path is not possible according to the API documentation,
# but just to be on the save side...
error = "no task returned"
submission_timestamp = result_data.get('submission_timestamp')
score = result_data.get('score')
insufficient_task_input_errors = result_data.get('insufficient_task_input_errors')
try:
expires = parse_datetime(result_data['expires'])
except KeyError:
expires = None
# NOTE: We insert the data we have already now right away. This way the
# caller can skip waiting for completion if possible
return SubmittedFileTask(
file_md5=file_md5,
file_sha1=file_sha1,
file_sha256=file_sha256,
filename=filename,
task_uuid=task_uuid,
score=score,
error=error,
error_exception=error_exception,
submission_timestamp=submission_timestamp,
insufficient_task_input_errors=insufficient_task_input_errors,
expires=expires,
)
def submit_filename(self, filename, **kwargs):
"""
Submit a file for analysis and retrieve results if they are immediately
available. Additional parameters passed to this function are forwarded
to the client (see `submit_file_hash` or `submit_file`).
NOTE: To avoid a race-condition between submission and polling for
results, use the following approach::
helper = SubmissionHelper(<client>)
ts = helper.get_api_utc_timestamp()
submission = helper.submit_filename(<filename>)
helper.wait_for_completion_of_submission(submission, ts)
or use the `submit_filenames_and_wait_for_completion()` helper function.
:param filename: File on the local filesystem to submit
:type filename: `str`
:returns: Submission results
:rtype: `SubmittedFileTask`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# NOTE: We release this file to customers who may run this code on Windows. Make
# sure to open the file in binary-mode. Python will otherwise truncate the file
# at the location where it detects non-text, and does so without any warning
with open(filename, 'rb') as file_stream:
return self.submit_file_stream(file_stream, **kwargs)
def submit_url(self, url, **kwargs):
"""
Submit a URL for analysis and retrieve results if they are immediately
available. Additional parameters passed to this function are forwarded
to the client (see `submit_url`).
NOTE: To avoid a race-condition between submission and polling for
results, use the following approach::
helper = SubmissionHelper(<client>)
ts = helper.get_api_utc_timestamp()
submission = helper.submit_url(<url>, referer=<referer>)
helper.wait_for_completion_of_submission(submission, ts)
or use the `submit_urls_and_wait_for_completion()` helper function.
:param url: URL to submit
:type url: `str`
:returns: Submission results
:rtype: `SubmittedURLTask`
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
self.__logger.info("Submitting URL %s", url)
result_data = None
task_uuid = None
submission_timestamp = None
score = None
error = None
error_exception = None
expires = None
try:
result_data = self.__analysis_client.submit_url(
url=url,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS,
**kwargs
)['data']
except AnalysisAPIError as err:
# we are handling this error, and it's not a bug in the code, so
# logged just as warning
self.__logger.warning("Submitting URL %s failed: %s", url, err)
error = str(err)
error_exception = err
if result_data is not None:
try:
task_uuid = result_data['task_uuid']
except KeyError:
# this path is not possible according to the API documentation,
# but just to be on the save side...
error = "no task returned"
else:
submission_timestamp = result_data.get('submission_timestamp')
score = result_data.get('score')
try:
expires = parse_datetime(result_data['expires'])
except KeyError:
expires = None
# NOTE: We insert the data we have already now right away. This way the
# caller can skip waiting for completion if possible
return SubmittedURLTask(
url=url,
referer=kwargs.get('referer'),
task_uuid=task_uuid,
submission_timestamp=submission_timestamp,
score=score,
error=error,
error_exception=error_exception,
expires=expires,
)
def submit_file_streams_and_wait_for_completion(
self, file_streams,
wait_completion_interval_seconds=15,
wait_completion_max_seconds=None,
**kwargs):
"""
Submit a list of files and wait for completion: For each file, submit
the file for analysis, wait for completion, and retrieve results.
Additional parameters passed to this function are forwarded to the
client (see `submit_file_hash` or `submit_file`).
:param file_streams: List of streams to submit
:type file_streams: `list`(`stream`)
:param wait_completion_interval_seconds: How long to wait between polls
for completion
:type wait_completion_interval_seconds: `float`
:param wait_completion_max_seconds: Don't wait for longer than this many
seconds for completion. If None is specified, wait forever.
NOTE: If waiting times out, the result will contain elements whose
score is set to `None`. This method does *not* raise
`WaitResultTimeout` to allow retrieving the result even when waiting
for completion timed out.
:type wait_completion_max_seconds: `float`
:returns: Dictionary of results
:rtype: `dict`(`SubmittedFileTask`)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
start_ts = self.get_api_utc_timestamp()
self.__logger.info("Submitting %d files", len(file_streams))
results = {}
for file_stream in file_streams:
# the caller may want to submit all files using the same
# filename, so we really forward *all* arguments
results[file_stream] = self.submit_file_stream(
file_stream=file_stream, **kwargs
)
try:
self.wait_for_completion(
results,
start_timestamp=start_ts,
wait_completion_interval_seconds=
wait_completion_interval_seconds,
wait_completion_max_seconds=wait_completion_max_seconds,
verify=kwargs.get('verify', True)
)
except WaitResultTimeout as err:
self.__logger.warning("Waiting for file submissions completion "
"failed: %s", err)
return results
def submit_filenames_and_wait_for_completion(
self, filenames,
wait_completion_interval_seconds=15,
wait_completion_max_seconds=None,
**kwargs):
"""
Submit a list of files and wait for completion: For each file, submit
the file for analysis, wait for completion, and retrieve results.
Additional parameters passed to this function are forwarded to the
client (see `submit_file_hash` or `submit_file`).
:param filenames: List of files on the local filesystem to submit
:type filenames: `list`(`str`)
:param wait_completion_interval_seconds: How long to wait between polls
for completion
:type wait_completion_interval_seconds: `float`
:param wait_completion_max_seconds: Don't wait for longer than this many
seconds for completion. If None is specified, wait forever.
NOTE: If waiting times out, the result will contain elements whose
score is set to `None`. This method does *not* raise
`WaitResultTimeout` to allow retrieving the result even when waiting
for completion timed out.
:type wait_completion_max_seconds: `float`
:returns: Dictionary of results
:rtype: `dict`(`SubmittedFileTask`)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
file_streams = {}
try:
# NOTE: use set() to make sure the list is unique
for filename in set(filenames):
# NOTE: We release this file to customers who may run this code on Windows. Make
# sure to open the file in binary-mode. Python will otherwise truncate the file
# at the location where it detects non-text, and does so without any warning
file_streams[open(filename, 'rb')] = filename
results_streams = self.submit_file_streams_and_wait_for_completion(
file_streams=list(file_streams.keys()),
wait_completion_interval_seconds=
wait_completion_interval_seconds,
wait_completion_max_seconds=wait_completion_max_seconds,
**kwargs
)
# map by-stream results into by-name results
results = {}
for file_stream, result in results_streams.items():
filename = file_streams[file_stream]
results[filename] = result
return results
finally:
for file_stream in file_streams:
file_stream.close()
def submit_urls_and_wait_for_completion(
self, urls,
wait_completion_interval_seconds=15,
wait_completion_max_seconds=None,
**kwargs):
"""
Submit a list of URLs and wait for completion: For each URL, submit
the URL for analysis, wait for completion, and retrieve results.
Additional parameters passed to this function are forwarded to the
client (see `submit_url`).
:param urls: List of URLs to submit
:type urls: `list`(`str`)
:param wait_completion_interval_seconds: How long to wait between polls
for completion
:type wait_completion_interval_seconds: `float`
:param wait_completion_max_seconds: Don't wait for longer than this many
seconds for completion. If None is specified, wait forever
:type wait_completion_max_seconds: `float`
:returns: Dictionary of results
:rtype: `dict`(`SubmittedURLTask`)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
start_ts = self.get_api_utc_timestamp()
self.__logger.info("Submitting %d URLs", len(urls))
results = {}
for url in urls:
self.__logger.info("Submitting URL %s", url)
results[url] = self.submit_url(url, **kwargs)
try:
self.wait_for_completion(
results,
start_timestamp=start_ts,
wait_completion_interval_seconds=
wait_completion_interval_seconds,
wait_completion_max_seconds=wait_completion_max_seconds,
verify=kwargs.get('verify', True)
)
except WaitResultTimeout as err:
self.__logger.warning("Waiting for URL submissions completion "
"failed: %s", err)
return results
def wait_for_completion_of_submission(
self, submission, start_timestamp,
wait_completion_interval_seconds=15,
wait_completion_max_seconds=None,
verify=True):
"""
Wait for completion of a given tasks.
:param submission: A submitted task. This object is updated in place
with result data
:type submission: `SubmittedTask`
:param start_timestamp: UTC timestamp before the first submission has
happened. Use `self.get_api_utc_timestamp()` to retrieve or use the
submission_timestamp returned from the submission.
:type start_timestamp: `datetime.datetime`
:param wait_completion_interval_seconds: How long to wait between polls
for completion
:type wait_completion_interval_seconds: `float`
:param wait_completion_max_seconds: Don't wait for longer than this many
seconds for completion. If None is specified, wait forever
:type wait_completion_max_seconds: `float`
:param verify: if False, disable SSL-certificate verification
:type verify: `bool`
:raises WaitResultTimeout: Waiting for results timed out
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
self.wait_for_completion(
submissions={1:submission},
start_timestamp=start_timestamp,
wait_completion_interval_seconds=wait_completion_interval_seconds,
wait_completion_max_seconds=wait_completion_max_seconds,
verify=verify,
)
def wait_for_completion(
self, submissions, start_timestamp,
wait_completion_interval_seconds=15,
wait_completion_max_seconds=None,
verify=True):
"""
Wait for completion of a given dictionary of tasks.
NOTE: Results are filled in in provided `submissions` dictionary.
:param submissions: Dictionary of submissions: submission identifier to
`SubmittedTask` mapping. NOTE: The submission identifier can be an
arbitrary value unique to the dictionary
:type submissions: `dict`(id:`SubmittedTask`)
:param start_timestamp: UTC timestamp before the first submission has
happened. Use `self.get_api_utc_timestamp()` to retrieve or use the
submission_timestamp returned from the submission.
:type start_timestamp: `datetime.datetime`
:param wait_completion_interval_seconds: How long to wait between polls
for completion
:type wait_completion_interval_seconds: `float`
:param wait_completion_max_seconds: Don't wait for longer than this many
seconds for completion. If None is specified, wait forever
:type wait_completion_max_seconds: `float`
:param verify: if False, disable SSL-certificate verification
:type verify: `bool`
:raises WaitResultTimeout: Waiting for results timed out
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
g = self.yield_completed_tasks(
submissions, start_timestamp,
wait_completion_interval_seconds=wait_completion_interval_seconds,
wait_completion_max_seconds=wait_completion_max_seconds,
verify=verify)
# wait for completion all the tasks by invoking generator
for _ in g:
pass
def __handle_communication_error(self, e, num_retries):
"""
Handle CommunicationError exception with retries
:param CommunicationError e: exception to handle
:param int|None num_retries: current number of retried
:return: Number of remaining retries
:rtype: int
:raises CommunicationError: If num retries expired.
"""
if num_retries is None:
self.__logger.warning(
"Communication Error - retry sending request. UNLIMITED times left."
)
elif num_retries > 0:
num_retries -= 1
self.__logger.warning(
"Communication Error - retry sending request. %d times left.", num_retries
)
else:
self.__logger.warning("Communication error: %s", e)
raise e
return num_retries
def yield_completed_tasks(
self, submissions, start_timestamp,
wait_completion_interval_seconds=15,
wait_completion_max_seconds=None,
verify=True):
"""
Returns a generator, which gives completed tasks as soon as they are
ready.
NOTE: Results are filled in in provided `submissions` dictionary.
NOTE: Any `SubmittedTask` instances that are part of the `submissions`
parameter and that are marked as completed already upon function
invocation will not be yielded.
:param submissions: Dictionary of submissions: submission identifier to
`SubmittedTask` mapping. NOTE: The submission identifier can be an
arbitrary value unique to the dictionary
:type submissions: `dict`(id:`SubmittedTask`)
:param start_timestamp: UTC timestamp before the first submission has
happened. Use `self.get_api_utc_timestamp()` to retrieve or use the
submission_timestamp returned from the submission.
:type start_timestamp: `datetime.datetime`
:param wait_completion_interval_seconds: How long to wait between polls
for completion
:type wait_completion_interval_seconds: `float`
:param wait_completion_max_seconds: Don't wait for longer than this many
seconds for completion. If None is specified, wait forever
:type wait_completion_max_seconds: `float`
:param verify: if False, disable SSL-certificate verification
:type verify: `bool`
:returns: generator that yields completed SubmittedTask objects
:rtype: `Iterator`(`SubmittedTask`)
:raises WaitResultTimeout: Waiting for results timed out
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
# find which submissions we're still waiting for and build an index for
# looking up existing data quickly
missing_results = {
result.task_uuid: submission_id
for submission_id, result in submissions.items()
if result.task_uuid is not None and not result.is_complete()
}
if not missing_results:
self.__logger.info("No need to wait for completion for any of %d "
"submissions", len(submissions))
return
self.__logger.info(
"Waiting for completion of %d/%d submissions",
len(missing_results),
len(submissions),
)
start_completion_time = time.time()
end_completion_time = (
start_completion_time + wait_completion_max_seconds
if wait_completion_max_seconds is not None else None
)
# Number of times to re-sending request
num_retries = self.__num_retries
while missing_results:
waiting_task_uuids = list(missing_results.keys())[
:SubmissionHelper.MAX_WAITING_TASK_UUIDS_NUM
]
if len(missing_results) > SubmissionHelper.MAX_WAITING_TASK_UUIDS_NUM:
waiting_tasks = '{},...'.format(','.join(waiting_task_uuids))
else:
waiting_tasks = ','.join(waiting_task_uuids)
self.__logger.debug(
"Waiting for completion of %d submissions: %s",
len(missing_results),
waiting_tasks
)
try:
completed_data = self.__analysis_client.get_completed(
after=start_timestamp,
verify=verify,
include_score=True
)['data']
# only ignore the communication error and resending the request
except CommunicationError as e:
num_retries = self.__handle_communication_error(e, num_retries)
else:
# reset times of retry to the default value if is not None
if self.__num_retries is not None:
num_retries = self.__num_retries
# resume from here next iteration:
start_timestamp = completed_data['before']
if completed_data['tasks']:
for task_uuid, score in completed_data['tasks'].items():
try:
submission_id = missing_results[task_uuid]
except KeyError:
# someone else is submitting with the same license or
# we already had the result
continue
self.__logger.debug("Got result for task %s", task_uuid)
# fill in the details
#
# NOTE: We're currently NOT checking if the analysis failed.
# this will be merged with "score=0" - it's up to the caller
# to check (or a future extension)
result = submissions[submission_id]
result.set_score(score) # result.is_complete() becomes True
del missing_results[task_uuid]
self.__logger.debug("Got result for task %s: %s",
task_uuid, result)
yield result
if not missing_results:
break
if completed_data['more_results_available']:
# If we have more results available to be fetched, don't need to sleep
continue
sleep_timeout = wait_completion_interval_seconds
if end_completion_time is not None:
now = time.time()
if now >= end_completion_time:
self.__logger.warning("Waiting for completion of %d "
"submissions timed out",
len(missing_results))
raise WaitResultTimeout()
# make sure we only sleep as long as we have time left before
# the timeout
if now + sleep_timeout > end_completion_time:
sleep_timeout = end_completion_time - now
time.sleep(sleep_timeout)
self.__logger.info("Done waiting for completion of %d submissions",
len(submissions))
def export_and_yield_reports(self, task_uuids, sleep_interval_seconds=15, **kwargs):
"""
For each task UUID, export an analysis report for the task and yield that report when
it's ready. Additional parameters passed to this function are forwarded to the
client (see `export_report`).
:param list(str) task_uuids: List of task UUIDs to submit
:param int sleep_interval_seconds: Interval to wait between queries to malscape
:returns: generator that yields generated reports
:rtype: Iterator(ExportedReport)
:raises AnalysisAPIError: Analysis API returns HTTP error or error code (and 'raw' not set)
:raises CommunicationError: Error contacting Lastline Analyst API.
"""
self.__logger.info("Exporting %d reports", len(task_uuids))
report_uuids = []
resume_after_report_uuid = None
resume_after_report_uuid_initialized = False
for task_uuid in task_uuids:
self.__logger.info("Exporting report for task %s", task_uuid)
error = None
exported_report_uuid = None
try:
result_data = self.__analysis_client.export_report(
uuid=task_uuid,
**kwargs
)['data']
except AnalysisAPIError as err:
# we are handling this error, and it's not a bug in the code, so
# logged just as warning
self.__logger.warning("Exporting report for task %s failed: %s", task_uuid, err)
error = str(err)
else:
try:
exported_report_uuid = result_data['exported_report_uuid']
except KeyError:
# this path is not possible according to the API documentation,
# but just to be on the safe side...
error = "no report identifier returned"
else:
if not resume_after_report_uuid_initialized:
resume_after_report_uuid = result_data.get('resume_after_report_uuid')
resume_after_report_uuid_initialized = True
if exported_report_uuid:
report_uuids.append(exported_report_uuid)
else:
# Task could not be exported, create an error report now
yield ExportedReport(task_uuid, export_error=error)
# Now that the exports are initiated, yield them as they complete
for exported_report in self.yield_exported_reports(
report_uuids,
resume_after_report_uuid,
sleep_interval_seconds=sleep_interval_seconds
):
yield exported_report
def yield_exported_reports(
self,
submitted_report_uuids,
resume_after_report_uuid,
sleep_interval_seconds=15
):
"""
Yield exported reports as they become available for download.
:param list(str) submitted_report_uuids: List of exported report UUIDs we wish to get.
:param str|None resume_after_report_uuid: The last consumed report UUID. To enumerate
reports ready for download, specify the last UUID that the client processed; will
only consider UUIDs of new reports that were made available after the given one. If
not provided, will query for *all* reports stored to find the submitted_report_uuids.
:param int sleep_interval_seconds: Interval to wait between queries to malscape
:returns: generator that yields generated reports
:rtype: Iterator(ExportedReport)
"""
if not submitted_report_uuids:
self.__logger.debug('no need to wait for completion, no reports requested')
return
num_retries = self.__num_retries
self.__logger.info(
"Waiting for completion of %d exported reports", len(submitted_report_uuids),
)
while submitted_report_uuids:
try:
reports = self.__analysis_client.get_completed_exported_reports(
resume_after_report_uuid
)['data']['available_reports']
except CommunicationError as e:
num_retries = self.__handle_communication_error(e, num_retries)
else:
for report in reports:
report_uuid = report['exported_report_uuid']
if report_uuid in submitted_report_uuids:
exported_report = None
task_uuid = report['task_uuid']
export_timestamp = parse_datetime(report['export_timestamp'])
export_error = report.get('export_error')
if export_error is not None:
# The report could not be exported, so don't even try to fetch it
self.__logger.debug(
'report %s not available, export failed: %s', report_uuid,
export_error
)
exported_report = ExportedReport(
task_uuid,
report_uuid=report_uuid,
export_timestamp=export_timestamp,
export_error=export_error,
)
else:
try:
stream = self.__analysis_client.get_exported_report(report_uuid)
except CommunicationError as e:
num_retries = self.__handle_communication_error(e, num_retries)
else:
self.__logger.debug('got report %s.', report_uuid)
exported_report = ExportedReport(
task_uuid,
report_uuid=report_uuid,
report_stream=stream,
export_timestamp=export_timestamp,
)
if exported_report:
yield exported_report
submitted_report_uuids.remove(report_uuid)
num_retries = self.__num_retries
resume_after_report_uuid = report_uuid
else:
self.__logger.debug(
'not yielding report %s, not in submission list', report_uuid
)
if submitted_report_uuids:
self.__logger.info(
"Sleeping %ds for completion of %d exported reports (%s ...)",
sleep_interval_seconds, len(submitted_report_uuids), submitted_report_uuids[0]
)
time.sleep(sleep_interval_seconds)
class QueryHelper(object):
"""
Helper class for handling queries
"""
def __init__(self, analysis_client, logger=None):
"""
:param analysis_client: The client to use
:type analysis_client: `AnalysisClientBase`
:param logger: Optional logger to use. If None is provided, log to
stdout
:type logger: logging.Logger
"""
self.__analysis_client = analysis_client
if logger:
self.__logger = logger
else:
self.__logger = logging.getLogger('lastline.analysis.api_client')
self.__logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
self.__logger.addHandler(ch)
def download_analysis_subject_file(self, task_uuid, password_protected=None):
"""
Helper method for checking if a file analysis subject is available for
download
:param str task_uuid: The task's UUID
:param str password_protected: If provided, use this password to create a zip which will
contain the artifact being fetched. The password provided should be using only
ASCII characters and have max length of 128 characters
:returns: A file-stream if the file is available, otherwise None
:rtype: `NamedStringIO`
"""
results = self.__analysis_client.get_result(
uuid=task_uuid,
full_report_score=ANALYSIS_API_NO_REPORT_DETAILS)
try:
reports = results['data']['reports']
except KeyError:
reports = None
if not reports:
return None
for report in reports:
report_uuid = report.get('report_uuid')
if report_uuid:
try:
stream = self.__analysis_client.get_result_artifact(
uuid=task_uuid,
report_uuid=report_uuid,
artifact_name='analysis_subject',
password_protected=password_protected,
)
except Error:
stream = None
if stream:
return stream
return None
def download_analysis_subject_by_file_hash(
self, md5=None, sha1=None, sha256=None, password_protected=None
):
"""
Helper method for checking if a file is available for download
:param str md5: Optional md5 hash of the file. Exactly one of the file-hash
parameters must to be provided
:param str sha1: Optional sha1 hash of the file. Exactly one of the file-
hash parameters must to be provided
:param str sha256: Optional sha256 hash of the file. Exactly one of the
file-hash parameters must to be provided
:param str password_protected: If provided, use this password to create a zip which will
contain the artifact being fetched. The password provided should be using only
ASCII characters and have max length of 128 characters
:returns: A file-stream if the file is available, otherwise None
:rtype: `NamedStringIO`
"""
result = self.__analysis_client.query_file_hash(
md5=md5,
sha1=sha1,
sha256=sha256)
if not result['data']['files_found']:
return None
return self.download_analysis_subject_file(
task_uuid=result['data']['tasks'][0]['task_uuid'],
password_protected=password_protected,
)
class AnalyzeHelper(object):
"""
This class provides helper functions used for submitting files and urls to the
Lastline Analyst API.
"""
DEFAULT_ANALYST_API_URL = 'https://analysis.lastline.com/analysis'
@classmethod
def factory(cls, url, token, key, logger, secure=False):
"""
Factory for this class
"""
client = AnalysisClient(
url,
key=key,
api_token=token,
# the API client is very verbose (in INFO mode) by default
logger=logger,
verify_ssl=secure
)
return AnalyzeHelper(
url=url,
token=token,
key=key,
client=client,
logger=logger
)
@staticmethod
def from_config(conf_file, conf_section, logger, secure):
"""
Parse the conf file and return a dict with the expected parameters. If the parameter is not
found in the section, read it with None
:param str conf_file: The path to the conf file we want to read
:param str conf_section: The section name used to read the parameters
:param logging.Logger logger: Logger to use
:raises: IOError if file doesnt exist
:raises: ConfigParser.Error if field not present
"""
conf = configparser.ConfigParser()
conf.readfp(open(conf_file))
try:
url = conf.get(conf_section, "url")
except configparser.Error:
url = None
if not url:
url = AnalyzeHelper.DEFAULT_ANALYST_API_URL
try:
key = conf.get(conf_section, "key")
token = conf.get(conf_section, "api_token")
except configparser.Error:
raise configparser.Error("Missing credentials in configuration")
return AnalyzeHelper.factory(
url=url,
token=token,
key=key,
logger=logger,
secure=secure
)
def __init__(self, url, token, key, client, logger):
"""
Create an instance of AnalyzeHelper
:param str url: the base url used for the api
:param str token: The token used for the api
:param str key: the key used for the api
:param AnalysisClient client: the client instance
:param logging.Logger logger: Logger to use
"""
self._url = url
self._key = key
self._token = token
self._logger = logger
self._client = client
def get_client(self):
"""
Retrieve the client object
:return: An instance of AnalysisClient configured for an api request
:rtype: AnalysisClient
"""
return self._client
def verify_connectivity(self):
"""
Check if the given client can talk to the given API and log human-readable reasons if it
fails
:return: True if connecitivty works, False otherwise
:rtype: bool
"""
self._logger.info("Testing connectivity to Lastline analysis API server")
ping_successful = False
try:
# NOTE: We check using `ping()` first, because it checks basic connectivity and
# credentials. Later, we also check using `get_completed()`, because it's one of the
# functions that we invoke that require more privileges than just connecting to the API
# for pushing submissions.
# If a client is using credentials that don't allow fetching results, we need to alert
# the user.
self._client.ping()
ping_successful = True
self._client.completed(after=datetime.datetime.now())
except requests.ConnectionError as err:
self._logger.error(
"Failed to connect to API server at %s, please make sure the API server is "
"reachable: %s", self._url, err
)
return False
except ssl.SSLError as err:
self._logger.error(
"Failed to verify SSL certificate for API at %s: %s",
self._url, err)
return False
except AnalysisAPIError as err:
if err.error_code == ANALYSIS_API_INVALID_CREDENTIALS:
self._logger.error(
"Invalid credentials for %s: failed to authenticate to API server", self._url
)
elif ping_successful:
# if we're partly successful, give the user a bit more information what could be the
# problem
self._logger.error(
"API Credentials used for %s don't allow required functionality:"
"%s", self._url, err
)
else:
self._logger.error("Failed to communicate with API at %s: %s", self._url, err)
return False
except Exception as err:
self._logger.error("Error in HTTP request to analysis API server: %s", err)
return False
else:
self._logger.info("Successfully connected to Lastline analysis API server")
return True
def _write_result(self, result, result_filename):
"""
Open the filename and write the result
:param str result: The results to write to the file
:param str result_filename: The location of the file to write the result to
:return: True if write operation was successful, False otherwise
:rtype: bool
"""
try:
with open(result_filename, "w") as f:
f.write(result)
except IOError as err:
self._logger.error("Failed to write result to file %s: %s", result_filename, err)
return False
return True
def store_result(self, task_uuid, base_result_filename):
"""
Retrieve analysis results and format/store them as JSON and XML
:param str task_uuid: The task to fetch results for
:param str base_result_filename: Name under which to store the result files as
(with .json/.xml suffixes)
:return: True if storing was successful, False otherwise
:rtype: bool
"""
report_url = os.path.join(self._url, 'portal#/analyst/task', task_uuid)
try:
json_result = self._client.get_result(task_uuid, raw=True)
json_analysis_tags = self._client.get_analysis_tags(task_uuid, raw=True)
except CommunicationError as err:
self._logger.error("Failed to connect to API server: %s", err)
return False
except AnalysisAPIError as err:
self._logger.error("Error in HTTP request to API: %s", err)
return False
try:
result = simplejson.loads(json_result)
analysis_tags = simplejson.loads(json_analysis_tags)
json_report_url = simplejson.dumps({'report_url': report_url})
except Exception as err:
logging.error("Unexpected response format for UUID %s: %s", task_uuid, err)
return False
if not result['success'] or not analysis_tags['success']:
self._logger.error("Error fetching results for UUID %s: %s", task_uuid, result)
return False
json_result_filename = base_result_filename + '_result.json'
json_report_result_filename = base_result_filename + '_report_url.json'
json_analysis_tags_filename = base_result_filename + '_analysis_tags.json'
self._write_result(json_result, json_result_filename)
self._write_result(json_analysis_tags, json_analysis_tags_filename)
self._write_result(json_report_url, json_report_result_filename)
# first one (in json) was successful.
# Now let's get it in raw XML.
try:
xml_result = self._client.get_result(task_uuid, requested_format="xml")
analysis_tags_xml = self._client.get_analysis_tags(task_uuid, requested_format="xml")
except CommunicationError as err:
self._logger.error("Failed to connect to API server: %s", err)
return False
except AnalysisAPIError as err:
self._logger.error("Error in HTTP request to API: %s", err)
return False
xml_result_filename = base_result_filename + '_result.xml'
xml_analysis_tags_filename = base_result_filename + '_analysis_tags.xml'
self._write_result(xml_result, xml_result_filename)
self._write_result(analysis_tags_xml, xml_analysis_tags_filename)
return True
#############################################################################
#
# END API-CLIENT FUNCTIONALITY
#
# START API-SHELL FUNCTIONALITY
#
# NOTE: We only keep this code in this module for backwards-compatibility
import sys
import optparse
def init_shell(banner):
"""Set up the iPython shell."""
# NOTE: We use a local import here to avoid requiring IPython when just using the
# module without the shell
try:
# pylint: disable=E0611,F0401
from IPython.frontend.terminal import embed
shell = embed.InteractiveShellEmbed(banner1=banner)
except ImportError: # iPython < 0.11
import IPython
# pylint: disable=E1101
# pylint won't find the class if a newer version is installed
shell = IPython.Shell.IPShellEmbed()
shell.set_banner(banner)
return shell
def main(argv):
deprecation_notice = "** DEPRECATION NOTICE: USE analysis_apiclient_shell.py INSTEAD **"
parser = optparse.OptionParser(usage="""
{deprecation_notice}
Run client for analysis api with the provided credentials
%prog access_key api_token
{deprecation_notice}
""".format(deprecation_notice=deprecation_notice))
parser.add_option("-u", "--api-url", dest="api_url",
type="string", default="https://analysis.lastline.com",
help="send API requests to this URL (debugging purposes)")
(cmdline_options, args) = parser.parse_args(argv[1:])
if len(args) != 2:
parser.print_help()
return 1
namespace = {}
namespace["analysis"] = AnalysisClient(cmdline_options.api_url,
key=args[0],
api_token=args[1])
shell = init_shell(banner=deprecation_notice)
shell(local_ns=namespace, global_ns=namespace)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
{
"content_hash": "df4412103f26444d1db2e0f318c18711",
"timestamp": "",
"source": "github",
"line_count": 5039,
"max_line_length": 100,
"avg_line_length": 43.64179400674737,
"alnum_prop": 0.5947405996062043,
"repo_name": "carbonblack/cb-lastline-connector",
"id": "31d9e9565f48f83a39ae7b68f5e905b91407b2ce",
"size": "219929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cbopensource/connectors/lastline/analysis_apiclient.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "Kotlin",
"bytes": "5153"
},
{
"name": "Python",
"bytes": "260672"
},
{
"name": "Shell",
"bytes": "5940"
}
],
"symlink_target": ""
}
|
import ctypes
import functools
import windows
from windows import utils
from windows import winproxy
import windows.generated_def as gdef
# import windows.security # at the end of this file (loop import)
bltn_type = type
KNOW_INTEGRITY_LEVEL = gdef.FlagMapper(
gdef.SECURITY_MANDATORY_UNTRUSTED_RID,
gdef.SECURITY_MANDATORY_LOW_RID,
gdef.SECURITY_MANDATORY_MEDIUM_RID,
gdef.SECURITY_MANDATORY_MEDIUM_PLUS_RID,
gdef.SECURITY_MANDATORY_HIGH_RID,
gdef.SECURITY_MANDATORY_SYSTEM_RID,
gdef.SECURITY_MANDATORY_PROTECTED_PROCESS_RID
)
# Voodoo to fix lookup-strangeness in class declaration
def meta_craft(x):
def partial_applier(infos_class, rtype):
return property(functools.partial(x, infos_class=infos_class, rtype=rtype))
return partial_applier
class TokenGroups(gdef.TOKEN_GROUPS):
@property
def _groups(self):
return windows.utils.resized_array(self.Groups, self.GroupCount)
@property
def sids_and_attributes(self):
"""The sids and attributes of each group
:type: [:class:`~windows.generated_def.winstructs.SID_AND_ATTRIBUTES`] - A list of :class:`~windows.generated_def.winstructs.SID_AND_ATTRIBUTES`
"""
return self._groups # Something else ?
@property
def sids(self):
"""The sids of each group
:type: [:class:`~windows.generated_def.winstructs.PSID`] - A list of :class:`~windows.generated_def.winstructs.PSID`
"""
return [g.Sid for g in self._groups]
def __repr__(self):
return "<{0} count={1}>".format(type(self).__name__, self.GroupCount)
TokenGroupsType = TokenGroups # Prevent confusion with token.TokenGroups
class TokenPrivileges(gdef.TOKEN_PRIVILEGES):
"""Improved ``TOKEN_PRIVILEGES`` usable like a mapping"""
@property
def _privileges(self):
return windows.utils.resized_array(self.Privileges, self.PrivilegeCount)
def all(self):
"""The list of all privileges
:returns: [:class:`~windows.generated_def.winstructs.LUID_AND_ATTRIBUTES`] - A list of :class:`~windows.generated_def.winstructs.LUID_AND_ATTRIBUTES`
"""
return list(self._privileges)
def keys(self):
"""The name of all privileges in the TokenPrivileges
:returns: [:class:`str`] - A list of name
"""
return [self._lookup_name(p.Luid) for p in self._privileges]
__iter__ = keys
def items(self):
"""The (name, Attribute) of all privileges in the TokenPrivileges
:returns: [(:class:`str`, :class:`int`)] - A list of (name, Attribute) tuple
"""
return [(self._lookup_name(p.Luid), p.Attributes) for p in self._privileges]
def _get_priv_by_name(self, name):
luid = self._lookup_value(name)
x = [p for p in self._privileges if p.Luid == luid]
if not x:
return None
assert len(x) == 1
return x[0]
def __getitem__(self, name):
"""Retrieve the attribute value for privilege ``name``
:raises: KeyError if privilege ``name`` not in the TokenPrivileges
:returns: :class:`int`
"""
priv = self._get_priv_by_name(name)
if not priv:
raise KeyError(name)
return priv.Attributes
def __setitem__(self, name, value):
"""Set the attribute value for privilege ``name``
:raises: KeyError if privilege ``name`` not in the TokenPrivileges
"""
priv = self._get_priv_by_name(name)
if not priv:
raise KeyError(name)
priv.Attributes = value
# __delitem__ that set SE_PRIVILEGE_REMOVED ?
def _lookup_name(self, luid):
size = gdef.DWORD(0x100)
buff = ctypes.create_unicode_buffer(size.value)
winproxy.LookupPrivilegeNameW(None, luid, buff, size)
return buff[:size.value]
def _lookup_value(self, name):
luid = gdef.LUID()
winproxy.LookupPrivilegeValueW(None, name, ctypes.byref(luid))
return luid
TokenPrivilegesType = TokenPrivileges
class TokenSecurityAttributesInformation(gdef.TOKEN_SECURITY_ATTRIBUTES_INFORMATION):
@property
def attributes(self):
"""Return all the attributes as :class:`TokenSecurityAttributeV1`
:type: [:class:`TokenSecurityAttributeV1`] - A list of token security attributes
"""
tptr = ctypes.cast(self.Attribute.pAttributeV1, ctypes.POINTER(TokenSecurityAttributeV1))
# Well look like this cast does NOT keep a ref to self.
# Setup the base object ref ourself
tptr._custom_base_ = self
return tptr[:self.AttributeCount]
class TokenSecurityAttributeV1(gdef.TOKEN_SECURITY_ATTRIBUTE_V1):
VALUE_ARRAY_PTR_BY_TYPE = {
gdef.TOKEN_SECURITY_ATTRIBUTE_TYPE_INT64: "pInt64",
gdef.TOKEN_SECURITY_ATTRIBUTE_TYPE_UINT64: "pUint64",
gdef.TOKEN_SECURITY_ATTRIBUTE_TYPE_STRING: "pString",
gdef.TOKEN_SECURITY_ATTRIBUTE_TYPE_FQBN: "pFqbn",
# TOKEN_SECURITY_ATTRIBUTE_TYPE_SID
# TOKEN_SECURITY_ATTRIBUTE_TYPE_BOOLEAN
gdef.TOKEN_SECURITY_ATTRIBUTE_TYPE_OCTET_STRING: "pOctetString",
}
@property
def name(self):
"""The name of the security attribute"""
return self.Name.str
@property
def values(self):
"""The values of the security attribute"""
array_name = self.VALUE_ARRAY_PTR_BY_TYPE[self.ValueType]
return getattr(self.Values, array_name)[:self.ValueCount]
def __repr__(self):
return """<{0} name="{1}">""".format(type(self).__name__, self.name)
# https://docs.microsoft.com/en-us/windows/desktop/SecAuthZ/access-tokens
class Token(utils.AutoHandle):
"""Represent a Windows Token.
The attributes only documented by a type are from the :class:`~windows.generated_def.winstructs.TOKEN_INFORMATION_CLASS`, such return values may be improved version of the structure.
.. note::
see `[MSDN] TOKEN_INFORMATION_CLASS <https://docs.microsoft.com/en-us/windows/desktop/api/winnt/ne-winnt-_token_information_class>`_
"""
def __init__(self, handle):
self._handle = handle
def _get_required_token_information_size(self, infos_class):
cbsize = gdef.DWORD()
try:
winproxy.GetTokenInformation(self.handle, infos_class, None, 0, ctypes.byref(cbsize))
except winproxy.WinproxyError as e:
if not e.winerror in (gdef.ERROR_INSUFFICIENT_BUFFER, gdef.ERROR_BAD_LENGTH):
raise
return cbsize.value
def get_token_infomations(self, infos_class, rtype):
required_size = self._get_required_token_information_size(infos_class)
requested_size = max(required_size, ctypes.sizeof(rtype))
buffer = utils.BUFFER(rtype, 1)(size=requested_size)
cbsize = gdef.DWORD()
winproxy.GetTokenInformation(self.handle, infos_class, buffer, buffer.real_size, cbsize)
return buffer[0]
def set_informations(self, info_type, infos):
return winproxy.SetTokenInformation(self.handle, info_type, ctypes.byref(infos), ctypes.sizeof(infos))
craft = meta_craft(get_token_infomations)
# https://docs.microsoft.com/en-us/windows/desktop/api/winnt/ne-winnt-_token_information_class
TokenUser = craft(gdef.TokenUser, gdef.TOKEN_USER) #: :class:`~windows.generated_def.winstructs.TOKEN_USER`
TokenGroups = craft(gdef.TokenGroups , TokenGroupsType) #: :class:`TokenGroups`
TokenPrivileges = craft(gdef.TokenPrivileges , TokenPrivilegesType) #: :class:`TokenPrivileges`
TokenOwner = craft(gdef.TokenOwner, gdef.TOKEN_OWNER) #: :class:`~windows.generated_def.winstructs.TOKEN_OWNER`
TokenPrimaryGroup = craft(gdef.TokenPrimaryGroup, gdef.TOKEN_PRIMARY_GROUP) #: :class:`~windows.generated_def.winstructs.TOKEN_PRIMARY_GROUP`
TokenDefaultDacl = craft(gdef.TokenDefaultDacl, gdef.TOKEN_DEFAULT_DACL) #: :class:`~windows.generated_def.winstructs.TOKEN_DEFAULT_DACL`
TokenSource = craft(gdef.TokenSource, gdef.TOKEN_SOURCE) #: :class:`~windows.generated_def.winstructs.TOKEN_SOURCE`
TokenType = craft(gdef.TokenType, gdef.TOKEN_TYPE) #: :class:`~windows.generated_def.winstructs.TOKEN_TYPE`
TokenImpersonationLevel = craft(gdef.TokenImpersonationLevel, gdef.SECURITY_IMPERSONATION_LEVEL) #: :class:`~windows.generated_def.winstructs.SECURITY_IMPERSONATION_LEVEL`
TokenStatistics = craft(gdef.TokenStatistics, gdef.TOKEN_STATISTICS) #: :class:`~windows.generated_def.winstructs.TOKEN_STATISTICS`
TokenRestrictedSids = craft(gdef.TokenRestrictedSids, TokenGroupsType) #: :class:`~windows.generated_def.winstructs.TokenGroups`
TokenSessionId = craft(gdef.TokenSessionId, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenGroupsAndPrivileges = craft(gdef.TokenGroupsAndPrivileges, gdef.TOKEN_GROUPS_AND_PRIVILEGES) #: :class:`~windows.generated_def.winstructs.TOKEN_GROUPS_AND_PRIVILEGES`
# TokenSessionReference = craft(gdef.TokenSessionReference, ???) # Reserved.
TokenSandBoxInert = craft(gdef.TokenSandBoxInert, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
# TokenAuditPolicy = craft(gdef.TokenAuditPolicy, ???) # Reserved.
TokenOrigin = craft(gdef.TokenOrigin, gdef.TOKEN_ORIGIN) #: :class:`~windows.generated_def.winstructs.TOKEN_ORIGIN`
TokenElevationType = craft(gdef.TokenElevationType, gdef.TOKEN_ELEVATION_TYPE) #: :class:`~windows.generated_def.winstructs.TOKEN_ELEVATION_TYPE`
TokenLinkedToken = craft(gdef.TokenLinkedToken, gdef.TOKEN_LINKED_TOKEN) #: :class:`~windows.generated_def.winstructs.TOKEN_LINKED_TOKEN`
TokenElevation = craft(gdef.TokenElevation, gdef.TOKEN_ELEVATION) #: :class:`~windows.generated_def.winstructs.TOKEN_ELEVATION`
TokenHasRestrictions = craft(gdef.TokenHasRestrictions, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenAccessInformation = craft(gdef.TokenAccessInformation, gdef.TOKEN_ACCESS_INFORMATION) #: :class:`~windows.generated_def.winstructs.TOKEN_ACCESS_INFORMATION`
TokenVirtualizationAllowed = craft(gdef.TokenVirtualizationAllowed, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenVirtualizationEnabled = craft(gdef.TokenVirtualizationEnabled, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenIntegrityLevel = craft(gdef.TokenIntegrityLevel, gdef.TOKEN_MANDATORY_LABEL) #: :class:`~windows.generated_def.winstructs.TOKEN_MANDATORY_LABEL`
TokenUIAccess = craft(gdef.TokenUIAccess, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenMandatoryPolicy = craft(gdef.TokenMandatoryPolicy, gdef.TOKEN_MANDATORY_POLICY) #: :class:`~windows.generated_def.winstructs.TOKEN_MANDATORY_POLICY`
TokenLogonSid = craft(gdef.TokenLogonSid, TokenGroupsType) #: :class:`TokenGroups`
TokenIsAppContainer = craft(gdef.TokenIsAppContainer, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenCapabilities = craft(gdef.TokenCapabilities, TokenGroupsType) #: :class:`TokenGroups`
TokenAppContainerSid = craft(gdef.TokenAppContainerSid, gdef.TOKEN_APPCONTAINER_INFORMATION) #: :class:`~windows.generated_def.winstructs.TOKEN_APPCONTAINER_INFORMATION`
TokenAppContainerNumber = craft(gdef.TokenAppContainerNumber, gdef.DWORD) #: :class:`~windows.generated_def.winstructs.DWORD`
TokenUserClaimAttributes = craft(gdef.TokenUserClaimAttributes, gdef.CLAIM_SECURITY_ATTRIBUTES_INFORMATION) #: :class:`~windows.generated_def.winstructs.CLAIM_SECURITY_ATTRIBUTES_INFORMATION`
TokenDeviceClaimAttributes = craft(gdef.TokenDeviceClaimAttributes, gdef.CLAIM_SECURITY_ATTRIBUTES_INFORMATION) #: :class:`~windows.generated_def.winstructs.CLAIM_SECURITY_ATTRIBUTES_INFORMATION`
# TokenRestrictedUserClaimAttributes = craft(gdef.TokenRestrictedUserClaimAttributes, ???) # Reserved.
# TokenRestrictedDeviceClaimAttributes = craft(gdef.TokenRestrictedDeviceClaimAttributes, ???) # Reserved.
TokenDeviceGroups = craft(gdef.TokenDeviceGroups, TokenGroupsType) #: :class:`TokenGroups`
TokenRestrictedDeviceGroups = craft(gdef.TokenRestrictedDeviceGroups, gdef.TOKEN_GROUPS) #: :class:`~windows.generated_def.winstructs.TOKEN_GROUPS`
# Reserved.
# Structure found in ntseapi.h (thx internet)
TokenSecurityAttributes = craft(gdef.TokenSecurityAttributes, TokenSecurityAttributesInformation) #: :class:`TokenSecurityAttributesInformation`
# Help would be appreciated for the structures of the following query type
# TokenIsRestricted = craft(gdef.TokenIsRestricted, ???) # Reserved.
TokenProcessTrustLevel = craft(gdef.TokenProcessTrustLevel, gdef.PSID) #: :class:`~windows.generated_def.winstructs.PSID`
# TokenPrivateNameSpace = craft(gdef.TokenPrivateNameSpace, gdef.ULONG) # Reserved.
# TokenSingletonAttributes = craft(gdef.TokenSingletonAttributes, ???) # Reserved.
# TokenBnoIsolation = craft(gdef.TokenBnoIsolation, ???) # Reserved.
# TokenChildProcessFlags = craft(gdef.TokenChildProcessFlags, ???) # Reserved.
# TokenIsLessPrivilegedAppContainer = craft(gdef.TokenIsLessPrivilegedAppContainer, ???) # Reserved.
# High level properties
@property
def user(self):
"""The user sid of the token
:type: :class:`~windows.generated_def.winstructs.PSID`
"""
return self.TokenUser.User.Sid
@property
def username(self):
"""The username of the token
:type: :class:`str`
"""
return self._user_and_computer_name()[1]
@property
def computername(self):
"""The computername of the token
:type: :class:`str`
"""
return self._user_and_computer_name()[0]
def _user_and_computer_name(self):
return windows.utils.lookup_sid(self.user)
groups = TokenGroups #: Alias for TokenGroups (type may change in the future for improved struct)
@property
def owner(self):
"""The owner sid of the token
:type: :class:`~windows.generated_def.winstructs.PSID`
"""
return self.TokenOwner.Owner
@property
def primary_group(self):
"""The sid of the primary group of the token
:type: :class:`~windows.generated_def.winstructs.PSID`
"""
return self.TokenPrimaryGroup.PrimaryGroup
@property
def default_dacl(self):
"""The defaul DACL of the token
:type: :class:`windows.security.Acl`
"""
return self.get_token_infomations(gdef.TokenDefaultDacl, windows.security.PAcl)[0]
# def source(self): (tok.TokenSource) ??
@property
def type(self):
"""The type (Primary / Impersonation) of the token
"""
return self.TokenType.value
@property
def impersonation_level(self):
"""The impersonation level of a ``TokenImpersonation`` token.
:raises: :class:`WindowsError` if token is not a ``TokenImpersonation``
:type: :class:`int` -- Enum value from :class:`~windows.generated_def.winstructs.SECURITY_IMPERSONATION_LEVEL`
"""
try:
return self.TokenImpersonationLevel.value
except WindowsError as e:
if (e.winerror == gdef.ERROR_INVALID_PARAMETER and
self.type != gdef.TokenImpersonation):
# raise ValueError ?
e.strerror += " This Token is not an Impersonation token"
raise
statistics = TokenStatistics #: Alias for TokenStatistics (type may change in the future for improved struct)
@property
def id(self):
"""The TokenId Specifies an unique identifier that identifies this instance of the token object.
:type: :class:`int`
"""
return int(self.TokenStatistics.TokenId)
@property
def authentication_id(self):
"""The AuthenticationId Specifies an unique identifier assigned to the session this token represents.
There can be many tokens representing a single logon session.
:type: :class:`int`
"""
return int(self.TokenStatistics.AuthenticationId)
@property
def modified_id(self):
"""The ModifiedId Specifies an unique identifier that changes each time the token is modified.
:type: :class:`int`
"""
return int(self.TokenStatistics.ModifiedId)
restricted_sids = TokenRestrictedSids #: Alias for TokenRestrictedSids (type may change in the future for improved struct)
session_id = TokenSessionId #: Alias for TokenSessionId (type may change in the future for improved struct)
@property
def groups_and_privileges(self):
"""Alias for TokenGroupsAndPrivileges (type may change in the future for improved struct)"""
# Return enhanced 'TOKEN_GROUPS_AND_PRIVILEGES' ?
return self.TokenGroupsAndPrivileges
@property
def privileges(self):
"""Alias for ``TokenPrivileges``
:type: :class:`TokenPrivileges`
"""
return self.TokenPrivileges
sandbox_inert = TokenSandBoxInert #: Alias for TokenSandBoxInert (type may change in the future for improved struct)
# def audit_policy(self):
# raise NotImplementedError("Need to find the type of TokenAuditPolicy")
@property
def origin(self):
"""The originating logon session of the token.
:type: :class:`int`
"""
origin_logon_session = self.TokenOrigin.OriginatingLogonSession
return int(origin_logon_session) # improved LUID implem __int__ :)
@property
def elevation_type(self):
"""The elevation type of the token.
:type: :class:`int` -- Enum value from :class:`~windows.generated_def.winstructs.TOKEN_ELEVATION_TYPE`
"""
return self.TokenElevationType.value
@property
def linked_token(self):
"""The token linked to our token if present (may raise else)
:type: :class:`Token`
"""
# TODO: return None if not present ?
return Token(self.TokenLinkedToken.LinkedToken)
@property
def elevated(self):
"""``True`` if token is an elevated token"""
return bool(self.TokenElevation.TokenIsElevated)
is_elevated = elevated #: Alias for ``elevated`` deprecated and may disapear
has_restriction = TokenHasRestrictions #: Alias for TokenHasRestrictions (type may change in the future for improved struct)
@property
def access_information(self):
"""Alias for TokenAccessInformation (type may change in the future for improved struct)"""
# Return enhanced subclass ?
return self.TokenAccessInformation
@property
def trust_level(self):
"""The trust level of the process if present else ``None``.
:type: :class:`~windows.generated_def.winstructs.PSID`
"""
tl = self.TokenProcessTrustLevel
if not tl: # NULL:
return None
return tl
virtualization_allowed = TokenVirtualizationAllowed #: Alias for TokenVirtualizationAllowed (type may change in the future for improved struct)
virtualization_enabled = TokenVirtualizationEnabled #: Alias for TokenVirtualizationEnabled (type may change in the future for improved struct)
@property
def integrity_level(self):
"""The integrity level and attributes of the token
:type: :class:`windows.generated_def.winstructs.SID_AND_ATTRIBUTES`
"""
return self.TokenIntegrityLevel.Label # SID_AND_ATTRIBUTES
def get_integrity(self):
"""Return the integrity level of the token
:type: :class:`int`
"""
sid = self.integrity_level.Sid
count = winproxy.GetSidSubAuthorityCount(sid)
integrity = winproxy.GetSidSubAuthority(sid, count[0] - 1)[0]
return KNOW_INTEGRITY_LEVEL[integrity]
def set_integrity(self, integrity):
"""Set the integrity level of a token
:param type: :class:`int`
"""
mandatory_label = gdef.TOKEN_MANDATORY_LABEL()
mandatory_label.Label.Attributes = 0x60
# cast integrity to int to accept SECURITY_MANDATORY_LOW_RID & other Flags
mandatory_label.Label.Sid = gdef.PSID.from_string("S-1-16-{0}".format(int(integrity)))
return self.set_informations(gdef.TokenIntegrityLevel, mandatory_label)
_INTEGRITY_PROPERTY_DOC = """The integrity of the token as an int (extracted from integrity PSID)
:getter: :func:`get_integrity`
:setter: :func:`set_integrity`
"""
integrity = property(get_integrity, set_integrity, doc=_INTEGRITY_PROPERTY_DOC)
ui_access = TokenUIAccess #: Alias for TokenUIAccess (type may change in the future for improved struct)
VALID_TOKEN_POLICIES = gdef.FlagMapper(
gdef.TOKEN_MANDATORY_POLICY_OFF,
gdef.TOKEN_MANDATORY_POLICY_NO_WRITE_UP,
gdef.TOKEN_MANDATORY_POLICY_NEW_PROCESS_MIN,
gdef.TOKEN_MANDATORY_POLICY_VALID_MASK,
)
@property
def mandatory_policy(self):
"""mandatory integrity access policy for the associated token
:type: :class:`int` -- see `[MSDN] mandatory policy <https://docs.microsoft.com/en-us/windows/desktop/api/winnt/ns-winnt-_token_mandatory_policy>`_
"""
return self.VALID_TOKEN_POLICIES[self.TokenMandatoryPolicy.Policy]
@property
def logon_sid(self):
"""The logon sid of the token. (Case of multiple logon sid not handled and will raise AssertionError)
:type: :class:`windows.generated_def.winstructs.SID_AND_ATTRIBUTES`
"""
rgroups = self.TokenLogonSid
assert rgroups.GroupCount == 1, "More than 1 TokenLogonSid"
return rgroups.Groups[0]
is_appcontainer = TokenIsAppContainer #: Alias for TokenIsAppContainer (type may change in the future for improved struct)
capabilities = TokenCapabilities #: Alias for TokenCapabilities (type may change in the future for improved struct)
@property
def appcontainer_sid(self):
"""The sid of the TokenAppContainerSid if present else ``None``
:type: :class:`~windows.generated_def.winstructs.PSID`
"""
sid = self.TokenAppContainerSid.TokenAppContainer
if not sid: # NULL
return None
return sid
appcontainer_number = TokenAppContainerNumber #: Alias for TokenAppContainerNumber (type may change in the future for improved struct)
@property
def security_attributes(self):
"""The security attributes of the token
:type: [:class:`TokenSecurityAttributeV1`] - A list of token security attributes
"""
return self.TokenSecurityAttributes.attributes
## Token Methods
def duplicate(self, access_rigth=gdef.MAXIMUM_ALLOWED, attributes=None, type=None, impersonation_level=None):
"""Duplicate the token into a new :class:`Token`.
:param type: The type of token: ``TokenPrimary(0x1L)`` or ``TokenImpersonation(0x2L)``
:param impersonation_level: The :class:`~windows.generated_def.winstructs.SECURITY_IMPERSONATION_LEVEL` for a ``TokenImpersonation(0x2L)``:
- If ``type`` is ``TokenPrimary(0x1L)`` this parameter is ignored if ``None`` or used as-is.
- If ``type`` is ``TokenImpersonation(0x2L)`` and this parameter is None, ``self.impersonation_level`` is used.
- If ``type`` is ``TokenImpersonation(0x2L)`` and our Token is a ``TokenPrimary(0x1L)`` this parameter MUST be provided
:returns: :class:`Token` - The duplicate token
Example:
>>> tok
<Token TokenId=0x39d6dde5 Type=TokenPrimary(0x1L)>
>>> tok.duplicate()
<Token TokenId=0x39d7b206 Type=TokenPrimary(0x1L)>
>>> tok.duplicate(type=gdef.TokenImpersonation)
...
ValueError: Duplicating a PrimaryToken as a TokenImpersonation require explicit <impersonation_level> parameter
>>> tok.duplicate(type=gdef.TokenImpersonation, impersonation_level=gdef.SecurityImpersonation)
<Token TokenId=0x39dadbf8 Type=TokenImpersonation(0x2L) ImpersonationLevel=SecurityImpersonation(0x2L)>
"""
newtoken = gdef.HANDLE()
if type is None:
type = self.type
if impersonation_level is None:
if self.type == gdef.TokenImpersonation:
impersonation_level = self.impersonation_level
elif type != gdef.TokenImpersonation:
impersonation_level = 0 #: ignored
else:
raise ValueError("Duplicating a PrimaryToken as a TokenImpersonation require explicit <impersonation_level> parameter")
winproxy.DuplicateTokenEx(self.handle, access_rigth, attributes, impersonation_level, type, newtoken)
return bltn_type(self)(newtoken.value)
def adjust_privileges(self, privileges):
"""Adjust the token privileges according to ``privileges``.
This API is the `complex one` to adjust multiple privileges at once.
To simply enable one privilege see :func:`enable_privilege`.
:param privileges: :class:`~windows.generated_def.winstructs.TOKEN_PRIVILEGES` (or subclass as :class:`TokenPrivileges`). To easily update your token privileges use the result of :data:`privileges`.
Example:
>>> tok = windows.current_process.token
>>> privs = tok.privileges
>>> privs["SeShutdownPrivilege"] = gdef.SE_PRIVILEGE_ENABLED
>>> privs["SeUndockPrivilege"] = gdef.SE_PRIVILEGE_ENABLED
>>> tok.adjust_privileges(privs)
"""
buffsize = None
if isinstance(privileges, TokenPrivilegesType):
# The TokenPrivilegesType should come from a PTR via Improved buffer
try:
buffsize = privileges._b_base_.real_size
except AttributeError as e:
pass
if buffsize is None:
buffsize = ctypes.sizeof(privileges)
winproxy.AdjustTokenPrivileges(self.handle, False, privileges, buffsize, None, None)
if winproxy.GetLastError() == gdef.ERROR_NOT_ALL_ASSIGNED:
# Transform this in a real WindowsError
raise WindowsError(gdef.ERROR_NOT_ALL_ASSIGNED, "Failed to adjust all privileges")
def enable_privilege(self, name):
"""Enable privilege ``name`` in the token
:raises: :class:`ValueError` if :class:`Token` has no privilege ``name``
"""
privs = self.privileges
try:
privs[name] = gdef.SE_PRIVILEGE_ENABLED
except KeyError as e:
# Emulate the WindowsError that would be triggered in 'adjust_privileges' ?
raise ValueError("{0} has no privilege <{1}>".format(self, name))
return self.adjust_privileges(privs)
def __repr__(self):
flag_repr = gdef.Flag.__repr__
try:
tid_int = int(self.TokenStatistics.TokenId) # May raise -> which is bad as __repr__ may be called on __del__...
except WindowsError as e:
return object.__repr__(self)
toktype = self.type
if toktype == gdef.TokenPrimary:
return "<{0} TokenId={1:#x} Type={2}>".format(type(self).__name__, tid_int, flag_repr(toktype))
return "<{0} TokenId={1:#x} Type={2} ImpersonationLevel={3}>".format(type(self).__name__, tid_int, flag_repr(toktype), flag_repr(self.impersonation_level))
import windows.security
|
{
"content_hash": "505704fcc2870c29a2e9750bbbd72f12",
"timestamp": "",
"source": "github",
"line_count": 626,
"max_line_length": 206,
"avg_line_length": 43.67571884984026,
"alnum_prop": 0.6815039683991075,
"repo_name": "hakril/PythonForWindows",
"id": "091a1ce1ff2cdff80e0cc7f4ea363f3b98ce7e69",
"size": "27341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windows/winobject/token.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4087889"
}
],
"symlink_target": ""
}
|
import sys
from datetime import datetime
from pyvirtualdisplay import Display
from selenium import webdriver
display = Display(visible=0, size=(800, 600)) # Virtual display for headless mode
display.start()
wifi_address = "http://192.168.1.1"
wifi_username = "admin"
wifi_password = "admin"
driver = webdriver.Firefox()
driver.get(wifi_address + '/login.asp') # Open Login page
driver.find_element_by_id("loginUsername").clear()
driver.find_element_by_id("loginUsername").send_keys(wifi_username) # Set username
driver.find_element_by_id("loginPassword").clear()
driver.find_element_by_id("loginPassword").send_keys(wifi_password) # Set password
driver.find_element_by_css_selector("button.upc_button1").click() # Submit login form
driver.get(wifi_address +'/wireless/radio.asp') # Go to wireless settings page
if str(sys.argv[1]).lower() == "enable":
# Select Enabled from dropdown menu
driver.find_element_by_xpath("//select[@name='WirelessEnable']/option[text()='Enabled']").click()
print str(datetime.now()) + " Enabling WiFi on router"
else:
# Select Disabled from dropdown menu
driver.find_element_by_xpath("//select[@name='WirelessEnable']/option[text()='Disabled']").click()
print str(datetime.now()) + " Disabling WiFi on router"
driver.find_element_by_id("CommitRadioSubmit").click() # Submit form with new settings
driver.find_element_by_css_selector("a.logout").click() # Logout becuse only one user can be logged-in
driver.quit()
display.stop()
sys.exit()
|
{
"content_hash": "de8d8e23570e71a94b383b6b9500e660",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 102,
"avg_line_length": 42.82857142857143,
"alnum_prop": 0.7358238825883923,
"repo_name": "Pytlicek/Router-Modem-Features",
"id": "84ca609625a81624446b80dd11f75358f693db77",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TC7200.U/UPC_WIFI_ENABLER.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1545"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import itertools
import operator
import os
from plumbum.lib import six
from abc import abstractmethod, abstractproperty
import warnings
from functools import reduce
class FSUser(int):
"""A special object that represents a file-system user. It derives from ``int``, so it behaves
just like a number (``uid``/``gid``), but also have a ``.name`` attribute that holds the
string-name of the user, if given (otherwise ``None``)
"""
def __new__(cls, val, name=None):
self = int.__new__(cls, val)
self.name = name
return self
class Path(str, six.ABC):
"""An abstraction over file system paths. This class is abstract, and the two implementations
are :class:`LocalPath <plumbum.machines.local.LocalPath>` and
:class:`RemotePath <plumbum.path.remote.RemotePath>`.
"""
CASE_SENSITIVE = True
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, str(self))
def __div__(self, other):
"""Joins two paths"""
return self.join(other)
__truediv__ = __div__
__getitem__ = __div__
def __floordiv__(self, expr):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
return self.glob(expr)
def __iter__(self):
"""Iterate over the files in this directory"""
return iter(self.list())
def __eq__(self, other):
if isinstance(other, Path):
return self._get_info() == other._get_info()
elif isinstance(other, str):
if self.CASE_SENSITIVE:
return str(self) == other
else:
return str(self).lower() == other.lower()
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
if self.CASE_SENSITIVE:
return hash(str(self))
else:
return hash(str(self).lower())
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def __fspath__(self):
"""Added for Python 3.6 support"""
return str(self)
def __contains__(self, item):
"""Paths should support checking to see if an file or folder is in them."""
try:
return (self / item.name).exists()
except AttributeError:
return (self / item).exists()
@abstractmethod
def _form(self, *parts):
pass
def up(self, count=1):
"""Go up in ``count`` directories (the default is 1)"""
return self.join("../" * count)
def walk(self, filter=lambda p: True,
dir_filter=lambda p: True): # @ReservedAssignment
"""traverse all (recursive) sub-elements under this directory, that match the given filter.
By default, the filter accepts everything; you can provide a custom filter function that
takes a path as an argument and returns a boolean
:param filter: the filter (predicate function) for matching results. Only paths matching
this predicate are returned. Defaults to everything.
:param dir_filter: the filter (predicate function) for matching directories. Only directories
matching this predicate are recursed into. Defaults to everything.
"""
for p in self.list():
if filter(p):
yield p
if p.is_dir() and dir_filter(p):
for p2 in p.walk(filter, dir_filter):
yield p2
@abstractproperty
def name(self):
"""The basename component of this path"""
@property
def basename(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .name instead", DeprecationWarning)
return self.name
@abstractproperty
def stem(self):
"""The name without an extension, or the last component of the path"""
@abstractproperty
def dirname(self):
"""The dirname component of this path"""
@abstractproperty
def root(self):
"""The root of the file tree (`/` on Unix)"""
@abstractproperty
def drive(self):
"""The drive letter (on Windows)"""
@abstractproperty
def suffix(self):
"""The suffix of this file"""
@abstractproperty
def suffixes(self):
"""This is a list of all suffixes"""
@abstractproperty
def uid(self):
"""The user that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``uid``), but it also has a ``.name``
attribute that holds the string-name of the user"""
@abstractproperty
def gid(self):
"""The group that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``gid``), but it also has a ``.name``
attribute that holds the string-name of the group"""
@abstractmethod
def as_uri(self, scheme=None):
"""Returns a universal resource identifier. Use ``scheme`` to force a scheme."""
@abstractmethod
def _get_info(self):
pass
@abstractmethod
def join(self, *parts):
"""Joins this path with any number of paths"""
@abstractmethod
def list(self):
"""Returns the files in this directory"""
@abstractmethod
def iterdir(self):
"""Returns an iterator over the directory. Might be slightly faster on Python 3.5 than .list()"""
@abstractmethod
def is_dir(self):
"""Returns ``True`` if this path is a directory, ``False`` otherwise"""
def isdir(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .is_dir() instead", DeprecationWarning)
return self.is_dir()
@abstractmethod
def is_file(self):
"""Returns ``True`` if this path is a regular file, ``False`` otherwise"""
def isfile(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .is_file() instead", DeprecationWarning)
return self.is_file()
def islink(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use is_symlink instead", DeprecationWarning)
return self.is_symlink()
@abstractmethod
def is_symlink(self):
"""Returns ``True`` if this path is a symbolic link, ``False`` otherwise"""
@abstractmethod
def exists(self):
"""Returns ``True`` if this path exists, ``False`` otherwise"""
@abstractmethod
def stat(self):
"""Returns the os.stats for a file"""
pass
@abstractmethod
def with_name(self, name):
"""Returns a path with the name replaced"""
@abstractmethod
def with_suffix(self, suffix, depth=1):
"""Returns a path with the suffix replaced. Up to last ``depth`` suffixes will be
replaced. None will replace all suffixes. If there are less than ``depth`` suffixes,
this will replace all suffixes. ``.tar.gz`` is an example where ``depth=2`` or
``depth=None`` is useful"""
def preferred_suffix(self, suffix):
"""Adds a suffix if one does not currently exist (otherwise, no change). Useful
for loading files with a default suffix"""
if len(self.suffixes) > 0:
return self
else:
return self.with_suffix(suffix)
@abstractmethod
def glob(self, pattern):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
@abstractmethod
def delete(self):
"""Deletes this path (recursively, if a directory)"""
@abstractmethod
def move(self, dst):
"""Moves this path to a different location"""
def rename(self, newname):
"""Renames this path to the ``new name`` (only the basename is changed)"""
return self.move(self.up() / newname)
@abstractmethod
def copy(self, dst, override=False):
"""Copies this path (recursively, if a directory) to the destination path. Raises TypeError if
dst exists and override is False."""
@abstractmethod
def mkdir(self, mode=0o777, parents=True, exist_ok=True):
"""
Creates a directory at this path.
:param mode: **Currently only implemented for local paths!** Numeric mode to use for directory
creation, which may be ignored on some systems. The current implementation
reproduces the behavior of ``os.mkdir`` (i.e., the current umask is first masked
out), but this may change for remote paths. As with ``os.mkdir``, it is recommended
to call :func:`chmod` explicitly if you need to be sure.
:param parents: If this is true (the default), the directory's parents will also be created if
necessary.
:param exist_ok: If this is true (the default), no exception will be raised if the directory
already exists (otherwise ``OSError``).
Note that the defaults for ``parents`` and ``exist_ok`` are the opposite of what they are in
Python's own ``pathlib`` - this is to maintain backwards-compatibility with Plumbum's behaviour
from before they were implemented.
"""
@abstractmethod
def open(self, mode="r"):
"""opens this path as a file"""
@abstractmethod
def read(self, encoding=None):
"""returns the contents of this file. By default the data is binary (``bytes``), but you can
specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def write(self, data, encoding=None):
"""writes the given data to this file. By default the data is expected to be binary (``bytes``),
but you can specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def touch(self):
"""Update the access time. Creates an empty file if none exists."""
@abstractmethod
def chown(self, owner=None, group=None, recursive=None):
"""Change ownership of this path.
:param owner: The owner to set (either ``uid`` or ``username``), optional
:param group: The group to set (either ``gid`` or ``groupname``), optional
:param recursive: whether to change ownership of all contained files and subdirectories.
Only meaningful when ``self`` is a directory. If ``None``, the value
will default to ``True`` if ``self`` is a directory, ``False`` otherwise.
"""
@abstractmethod
def chmod(self, mode):
"""Change the mode of path to the numeric mode.
:param mode: file mode as for os.chmod
"""
@staticmethod
def _access_mode_to_flags(mode,
flags={
"f": os.F_OK,
"w": os.W_OK,
"r": os.R_OK,
"x": os.X_OK
}):
if isinstance(mode, str):
mode = reduce(operator.or_, [flags[m] for m in mode.lower()], 0)
return mode
@abstractmethod
def access(self, mode=0):
"""Test file existence or permission bits
:param mode: a bitwise-or of access bits, or a string-representation thereof:
``'f'``, ``'x'``, ``'r'``, ``'w'`` for ``os.F_OK``, ``os.X_OK``,
``os.R_OK``, ``os.W_OK``
"""
@abstractmethod
def link(self, dst):
"""Creates a hard link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def symlink(self, dst):
"""Creates a symbolic link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def unlink(self):
"""Deletes a symbolic link"""
def split(self, *dummy_args, **dummy_kargs):
"""Splits the path on directory separators, yielding a list of directories, e.g,
``"/var/log/messages"`` will yield ``['var', 'log', 'messages']``.
"""
parts = []
path = self
while path != path.dirname:
parts.append(path.name)
path = path.dirname
return parts[::-1]
@property
def parts(self):
"""Splits the directory into parts, including the base directroy, returns a tuple"""
return tuple([self.drive + self.root] + self.split())
def relative_to(self, source):
"""Computes the "relative path" require to get from ``source`` to ``self``. They satisfy the invariant
``source_path + (target_path - source_path) == target_path``. For example::
/var/log/messages - /var/log/messages = []
/var/log/messages - /var = [log, messages]
/var/log/messages - / = [var, log, messages]
/var/log/messages - /var/tmp = [.., log, messages]
/var/log/messages - /opt = [.., var, log, messages]
/var/log/messages - /opt/lib = [.., .., var, log, messages]
"""
if isinstance(source, str):
source = self._form(source)
parts = self.split()
baseparts = source.split()
ancestors = len(
list(
itertools.takewhile(lambda p: p[0] == p[1],
zip(parts, baseparts))))
return RelativePath([".."] * (len(baseparts) - ancestors) +
parts[ancestors:])
def __sub__(self, other):
"""Same as ``self.relative_to(other)``"""
return self.relative_to(other)
def _glob(self, pattern, fn):
"""Applies a glob string or list/tuple/iterable to the current path, using ``fn``"""
if isinstance(pattern, str):
return fn(pattern)
else:
results = []
for single_pattern in pattern:
results.extend(fn(single_pattern))
return sorted(list(set(results)))
def resolve(strict=False):
"""Added to allow pathlib like syntax. Does nothing since
Plumbum paths are always absolute. Does not (currently) resolve
symlinks."""
# TODO: Resolve symlinks here
return self
@property
def parents(self):
"""Pathlib like sequence of ancestors"""
join = lambda x, y: self._form(x) / y
as_list = (reduce(join, self.parts[:i], self.parts[0])
for i in range(len(self.parts) - 1, 0, -1))
return tuple(as_list)
@property
def parent(self):
"""Pathlib like parent of the path."""
return self.parents[0]
class RelativePath(object):
"""
Relative paths are the "delta" required to get from one path to another.
Note that relative path do not point at anything, and thus are not paths.
Therefore they are system agnostic (but closed under addition)
Paths are always absolute and point at "something", whether existent or not.
Relative paths are created by subtracting paths (``Path.relative_to``)
"""
def __init__(self, parts):
self.parts = parts
def __str__(self):
return "/".join(self.parts)
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
def __getitem__(self, index):
return self.parts[index]
def __repr__(self):
return "RelativePath(%r)" % (self.parts, )
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
return hash(str(self))
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def up(self, count=1):
return RelativePath(self.parts[:-count])
def __radd__(self, path):
return path.join(*self.parts)
|
{
"content_hash": "59039dead8d58b29280454e10f504eff",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 110,
"avg_line_length": 33.75456389452333,
"alnum_prop": 0.5785109067964666,
"repo_name": "AndydeCleyre/plumbum",
"id": "2c1fe06c4f80df3d2df150c24d058b300e4d460c",
"size": "16641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plumbum/path/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "52"
},
{
"name": "Python",
"bytes": "440514"
},
{
"name": "Shell",
"bytes": "810"
}
],
"symlink_target": ""
}
|
"""
This module provides access to LDAP servers, along with some basic functionality required for Hue and
User Admin to work seamlessly with LDAP.
"""
import desktop.conf
import ldap
import ldap.filter
import logging
LOG = logging.getLogger(__name__)
CACHED_LDAP_CONN = None
def get_connection():
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = desktop.conf.LDAP.LDAP_URL.get()
nt_domain = desktop.conf.LDAP.NT_DOMAIN.get()
username = desktop.conf.LDAP.BIND_DN.get()
password = desktop.conf.LDAP.BIND_PASSWORD.get()
ldap_cert = desktop.conf.LDAP.LDAP_CERT.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
return LdapConnection(ldap_url, get_ldap_username(username, nt_domain),
password, ldap_cert)
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
if cert_file is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
ldap.set_option(ldap.OPT_REFERRALS, 0)
self.ldap_handle = ldap.initialize(ldap_url)
if bind_user is not None:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except:
raise RuntimeError("Failed to bind to LDAP server as user %s" %
(bind_user,))
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('','')
except:
raise RuntimeError("Failed to bind to LDAP server anonymously")
def find_user(self, username, find_by_dn=False):
"""
LDAP search helper method finding users. This supports searching for users
by distinguished name, or the configured username attribute.
"""
base_dn = self._get_root_dn()
scope = ldap.SCOPE_SUBTREE
user_filter = desktop.conf.LDAP.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
user_name_attr = desktop.conf.LDAP.USERS.USER_NAME_ATTR.get()
if find_by_dn:
sanitized_name = ldap.filter.escape_filter_chars(username)
user_name_filter = '(distinguishedName=' + sanitized_name + ')'
else:
sanitized_name = ldap.filter.escape_filter_chars(username)
user_name_filter = '(' + user_name_attr + '=' + sanitized_name + ')'
ldap_filter = '(&' + user_filter + user_name_filter + ')'
ldap_result_id = self.ldap_handle.search(base_dn, scope, ldap_filter)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT and result_data[0][0] is not None:
data = result_data[0][1]
user_info = { 'username': data[user_name_attr][0] }
if 'givenName' in data:
user_info['first'] = data['givenName'][0]
if 'sn' in data:
user_info['last'] = data['sn'][0]
if 'mail' in data:
user_info['email'] = data['mail'][0]
return user_info
return None
def find_group(self, groupname, find_by_dn=False):
"""
LDAP search helper method for finding groups
"""
base_dn = self._get_root_dn()
scope = ldap.SCOPE_SUBTREE
group_filter = desktop.conf.LDAP.GROUPS.GROUP_FILTER.get()
if not group_filter.startswith('('):
group_filter = '(' + group_filter + ')'
group_name_attr = desktop.conf.LDAP.GROUPS.GROUP_NAME_ATTR.get()
if find_by_dn:
sanitized_name = ldap.filter.escape_filter_chars(groupname)
group_name_filter = '(distinguishedName=' + sanitized_name + ')'
else:
sanitized_name = ldap.filter.escape_filter_chars(groupname)
group_name_filter = '(' + group_name_attr + '=' + sanitized_name + ')'
ldap_filter = '(&' + group_filter + group_name_filter + ')'
ldap_result_id = self.ldap_handle.search(base_dn, scope, ldap_filter)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT and result_data[0][0] is not None:
data = result_data[0][1]
group_info = { 'name': data[group_name_attr][0] }
member_attr = desktop.conf.LDAP.GROUPS.GROUP_MEMBER_ATTR.get()
if member_attr in data:
group_info['members'] = data[member_attr]
else:
group_info['members'] = []
return group_info
return None
def _get_root_dn(self):
"""
Returns the configured base DN (DC=desktop,DC=local).
"""
return desktop.conf.LDAP.BASE_DN.get()
|
{
"content_hash": "36108c76857c7d8f6be6b035c739649c",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 101,
"avg_line_length": 32.82876712328767,
"alnum_prop": 0.6476110995201335,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "5668bc49405387eb8f3d32808cad5607e4118abf",
"size": "5584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/useradmin/src/useradmin/ldap_access.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
"""
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import json
import uuid
from django.core.serializers.json import DjangoJSONEncoder
class BetterJSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, uuid.UUID):
return obj.hex
elif isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
elif isinstance(obj, (set, frozenset)):
return list(obj)
return super(BetterJSONEncoder, self).default(obj)
def dumps(value, **kwargs):
return json.dumps(value, cls=BetterJSONEncoder, **kwargs)
|
{
"content_hash": "74af9e4cd8f942ee88552e3a6aa8704f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 82,
"avg_line_length": 27.24137931034483,
"alnum_prop": 0.6734177215189874,
"repo_name": "nkovshov/gargoyle",
"id": "31540578dd51b0c147305368acedf4e57f1305ce",
"size": "790",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gargoyle/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6567"
},
{
"name": "HTML",
"bytes": "12767"
},
{
"name": "JavaScript",
"bytes": "8010"
},
{
"name": "Python",
"bytes": "124177"
}
],
"symlink_target": ""
}
|
class ExecutionConfiguration(object):
@staticmethod
def get_default_configuration():
return ExecutionConfiguration()
def __init__(self):
self.allow_to_transit_to_multiple_states = True
self.allow_to_stay_in_state_on_update = False
self.exit_when_end_state_is_encountered = False
|
{
"content_hash": "d7f7c76f1b9f197705a5246414cdb1ae",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 55,
"avg_line_length": 32.4,
"alnum_prop": 0.6820987654320988,
"repo_name": "Hicks48/amber-state-machine-python",
"id": "da1c5e18e12acc8435fcf58fe55985c3200b8a3b",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amber_automate/execution_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28952"
}
],
"symlink_target": ""
}
|
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8#p-cv-n%@ss_grg4v-_&ymc0n9%)bu_2_d8n@qg2s(%l(vkq@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Aovek'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(PROJECT_ROOT, "../../"))
CONFIG_FILE = PROJECT_ROOT + '/../../config.json'
|
{
"content_hash": "c2c2cb2afe621c6927545579af1db4c8",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 91,
"avg_line_length": 25.6,
"alnum_prop": 0.6802884615384616,
"repo_name": "nikolaystanishev/traffic-sign-recognition",
"id": "adbcf1437ad19830875cc5f0b73c660c856aafa3",
"size": "3328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/web/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4370"
}
],
"symlink_target": ""
}
|
"""
Autopsy Forensic Browser
Copyright 2016 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
import traceback
import general
"""
Analyzes database created by browser that stores GEO location info.
"""
class BrowserLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
def analyze(self, dataSource, fileManager, context):
try:
abstractFiles = fileManager.findFiles(dataSource, "CachedGeoposition%.db")
for abstractFile in abstractFiles:
if abstractFile.getSize() == 0:
continue
try:
jFile = File(Case.getCurrentCase().getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing browser location files", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
self._logger.log(Level.SEVERE, "Error finding browser location files", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
Class.forName("org.sqlite.JDBC") #load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException, SQLException) as ex:
self._logger.log(Level.SEVERE, "Error connecting to SQL database", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
try:
resultSet = statement.executeQuery("SELECT timestamp, latitude, longitude, accuracy FROM CachedPosition;")
while resultSet.next():
timestamp = Long.valueOf(resultSet.getString("timestamp")) / 1000
latitude = Double.valueOf(resultSet.getString("latitude"))
longitude = Double.valueOf(resultSet.getString("longitude"))
artifact = abstractFile.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_TRACKPOINT)
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LATITUDE, general.MODULE_NAME, latitude))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LONGITUDE, general.MODULE_NAME, longitude))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME, general.MODULE_NAME, timestamp))
artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PROG_NAME, general.MODULE_NAME, "Browser Location History"))
# artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_VALUE.getTypeID(),moduleName, accuracy))
# NOTE: originally commented out
try:
# index the artifact for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard()
blackboard.indexArtifact(artifact)
except Blackboard.BlackboardException as ex:
self._logger.log(Level.SEVERE, "Unable to index blackboard artifact " + artifact.getArtifactTypeName(), ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
MessageNotifyUtil.Notify.error("Failed to index GPS trackpoint artifact for keyword search.", artifact.getDisplayName())
except Exception as ex:
self._logger.log(Level.SEVERE, "Error putting artifacts to blackboard", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
self._logger.log(Level.SEVERE, "Error closing database", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
|
{
"content_hash": "98ddfb6b3f4a4528b046bd42e74a47e7",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 157,
"avg_line_length": 49.708333333333336,
"alnum_prop": 0.6955574182732607,
"repo_name": "dgrove727/autopsy",
"id": "ef79a623c230344949659636bc9dbf9eddd30c8b",
"size": "5965",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "InternalPythonModules/android/browserlocation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4467"
},
{
"name": "HTML",
"bytes": "9201"
},
{
"name": "Java",
"bytes": "9156290"
},
{
"name": "Python",
"bytes": "284030"
}
],
"symlink_target": ""
}
|
'''
main issue here was to global patforrec. youll probs forget.
'''
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
import numpy as np
from numpy import loadtxt
import time
totalStart = time.time()
date,bid,ask = np.loadtxt('GBPUSD1d.txt', unpack=True,
delimiter=',',
converters={0:mdates.strpdate2num('%Y%m%d%H%M%S')})
avgLine = ((bid+ask)/2)
####DEFINE######
#CHANGE#
patternAr = []
performanceAr = []
patForRec = []
def percentChange(startPoint,currentPoint):
try:
x = ((float(currentPoint)-startPoint)/abs(startPoint))*100.00
if x == 0.0:
return 0.000000001
else:
return x
except:
return 0.0001
def patternStorage():
'''
The goal of patternFinder is to begin collection of %change patterns
in the tick data. From there, we also collect the short-term outcome
of this pattern. Later on, the length of the pattern, how far out we
look to compare to, and the length of the compared range be changed,
and even THAT can be machine learned to find the best of all 3 by
comparing success rates.'''
startTime = time.time()
x = len(avgLine)-30
y = 11
currentStance = 'none'
while y < x:
pattern = []
p1 = percentChange(avgLine[y-10], avgLine[y-9])
p2 = percentChange(avgLine[y-10], avgLine[y-8])
p3 = percentChange(avgLine[y-10], avgLine[y-7])
p4 = percentChange(avgLine[y-10], avgLine[y-6])
p5 = percentChange(avgLine[y-10], avgLine[y-5])
p6 = percentChange(avgLine[y-10], avgLine[y-4])
p7 = percentChange(avgLine[y-10], avgLine[y-3])
p8 = percentChange(avgLine[y-10], avgLine[y-2])
p9 = percentChange(avgLine[y-10], avgLine[y-1])
p10= percentChange(avgLine[y-10], avgLine[y])
outcomeRange = avgLine[y+20:y+30]
currentPoint = avgLine[y]
try:
avgOutcome = reduce(lambda x, y: x + y, outcomeRange) / len(outcomeRange)
except Exception, e:
print str(e)
avgOutcome = 0
#Define
futureOutcome = percentChange(currentPoint, avgOutcome)
#print some logics
'''
print 'where we are historically:',currentPoint
print 'soft outcome of the horizon:',avgOutcome
print 'This pattern brings a future change of:',futureOutcome
print '_______'
print p1, p2, p3, p4, p5, p6, p7, p8, p9, p10
'''
pattern.append(p1)
pattern.append(p2)
pattern.append(p3)
pattern.append(p4)
pattern.append(p5)
pattern.append(p6)
pattern.append(p7)
pattern.append(p8)
pattern.append(p9)
pattern.append(p10)
patternAr.append(pattern)
performanceAr.append(futureOutcome)
y+=1
#####
endTime = time.time()
print len(patternAr)
print len(performanceAr)
print 'Pattern storing took:', endTime-startTime
#####
####
####
def currentPattern():
mostRecentPoint = avgLine[-1]
cp1 = percentChange(avgLine[-11],avgLine[-10])
cp2 = percentChange(avgLine[-11],avgLine[-9])
cp3 = percentChange(avgLine[-11],avgLine[-8])
cp4 = percentChange(avgLine[-11],avgLine[-7])
cp5 = percentChange(avgLine[-11],avgLine[-6])
cp6 = percentChange(avgLine[-11],avgLine[-5])
cp7 = percentChange(avgLine[-11],avgLine[-4])
cp8 = percentChange(avgLine[-11],avgLine[-3])
cp9 = percentChange(avgLine[-11],avgLine[-2])
cp10= percentChange(avgLine[-11],avgLine[-1])
patForRec.append(cp1)
patForRec.append(cp2)
patForRec.append(cp3)
patForRec.append(cp4)
patForRec.append(cp5)
patForRec.append(cp6)
patForRec.append(cp7)
patForRec.append(cp8)
patForRec.append(cp9)
patForRec.append(cp10)
#print patForRec
def graphRawFX():
fig=plt.figure(figsize=(10,7))
ax1 = plt.subplot2grid((40,40), (0,0), rowspan=40, colspan=40)
ax1.plot(date,bid)
ax1.plot(date,ask)
#ax1.plot(date,((bid+ask)/2))
#ax1.plot(date,percentChange(ask[0],ask),'r')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
#####
plt.grid(True)
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
#######
ax1_2 = ax1.twinx()
#ax1_2.plot(date, (ask-bid))
ax1_2.fill_between(date, 0, (ask-bid), facecolor='g',alpha=.3)
#ax1_2.set_ylim(0, 3*ask.max())
#######
plt.subplots_adjust(bottom=.23)
#plt.grid(True)
plt.show()
def patternRecognition():
for eachPattern in patternAr:
sim1 = 100.00 - abs(percentChange(eachPattern[0], patForRec[0]))
sim2 = 100.00 - abs(percentChange(eachPattern[1], patForRec[1]))
sim3 = 100.00 - abs(percentChange(eachPattern[2], patForRec[2]))
sim4 = 100.00 - abs(percentChange(eachPattern[3], patForRec[3]))
sim5 = 100.00 - abs(percentChange(eachPattern[4], patForRec[4]))
sim6 = 100.00 - abs(percentChange(eachPattern[5], patForRec[5]))
sim7 = 100.00 - abs(percentChange(eachPattern[6], patForRec[6]))
sim8 = 100.00 - abs(percentChange(eachPattern[7], patForRec[7]))
sim9 = 100.00 - abs(percentChange(eachPattern[8], patForRec[8]))
sim10 = 100.00 - abs(percentChange(eachPattern[9], patForRec[9]))
howSim = (sim1+sim2+sim3+sim4+sim5+sim6+sim7+sim8+sim9+sim10)/10.00
if howSim > 70:
patdex = patternAr.index(eachPattern)
print patdex
print '##################################'
print '##################################'
print '##################################'
print '##################################'
print patForRec
print '==================================='
print '==================================='
print eachPattern
print '----------'
print 'predicted outcome:',performanceAr[patdex]
print '##################################'
print '##################################'
print '##################################'
print '##################################'
patternStorage()
currentPattern()
patternRecognition()
totalEnd = time.time()-totalStart
print 'Entire processing took:',totalEnd,'seconds'
|
{
"content_hash": "e3e73c6fcaba809eff8e55213d8d5c82",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 85,
"avg_line_length": 30.451612903225808,
"alnum_prop": 0.565677966101695,
"repo_name": "PythonProgramming/Pattern-Recognition-for-Forex-Trading",
"id": "f8711ec747e3d04eba4e5a8fbcfbc6c5466e9b40",
"size": "6608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machFX8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103001"
}
],
"symlink_target": ""
}
|
import os
import unittest
import imath
import random
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferOSL
import GafferOSLTest
import GafferImage
class OSLShaderTest( GafferOSLTest.OSLTestCase ) :
def test( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["name"].getValue(), s )
self.assertEqual( n["type"].getValue(), "osl:surface" )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "c", "s", "m" ] )
self.assertTrue( isinstance( n["parameters"]["i"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["parameters"]["f"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["parameters"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["parameters"]["s"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["parameters"]["m"], Gaffer.M44fPlug ) )
self.assertEqual( n["parameters"]["i"].defaultValue(), 10 )
self.assertEqual( n["parameters"]["f"].defaultValue(), 1 )
self.assertEqual( n["parameters"]["c"].defaultValue(), imath.Color3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["s"].defaultValue(), "s" )
self.assertEqual( n["parameters"]["m"].defaultValue(), imath.M44f() )
self.assertEqual( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
network = n.attributes()["osl:surface"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.outputShader().name, s )
self.assertEqual( network.outputShader().type, "osl:surface" )
self.assertEqual( network.outputShader().parameters["i"], IECore.IntData( 10 ) )
self.assertEqual( network.outputShader().parameters["f"], IECore.FloatData( 1 ) )
self.assertEqual( network.outputShader().parameters["c"], IECore.Color3fData( imath.Color3f( 1, 2, 3 ) ) )
self.assertEqual( network.outputShader().parameters["s"], IECore.StringData( "s" ) )
self.assertEqual( network.outputShader().parameters["m"], IECore.M44fData( imath.M44f() ) )
def testOutputTypes( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["name"].getValue(), s )
self.assertEqual( n["type"].getValue(), "osl:shader" )
self.assertEqual( len( n["parameters"] ), 1 )
self.assertEqual( n["parameters"].keys(), [ "input" ] )
self.assertEqual( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
self.assertEqual( n["out"].keys(), [ "i", "f", "c", "s", "m" ] )
self.assertTrue( isinstance( n["out"]["i"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["out"]["f"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["out"]["c"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["out"]["s"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["out"]["m"], Gaffer.M44fPlug ) )
def testNetwork( self ) :
typesShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
outputTypesShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
typesNode = GafferOSL.OSLShader( "types" )
outputTypesNode = GafferOSL.OSLShader( "outputTypes" )
typesNode.loadShader( typesShader )
outputTypesNode.loadShader( outputTypesShader )
typesNode["parameters"]["i"].setInput( outputTypesNode["out"]["i"] )
self.assertEqual( typesNode["parameters"]["i"].getValue(), 10 )
network = typesNode.attributes()["osl:surface"]
self.assertEqual( len( network ), 2 )
self.assertEqual( network.getOutput(), ( "types", "" ) )
types = network.getShader( "types" )
outputTypes = network.getShader( "outputTypes" )
self.assertEqual( types.name, typesShader )
self.assertEqual( types.type, "osl:surface" )
self.assertEqual( types.parameters["f"], IECore.FloatData( 1 ) )
self.assertEqual( types.parameters["c"], IECore.Color3fData( imath.Color3f( 1, 2, 3 ) ) )
self.assertEqual( types.parameters["s"], IECore.StringData( "s" ) )
self.assertEqual( outputTypes.name, outputTypesShader )
self.assertEqual( outputTypes.type, "osl:shader" )
self.assertEqual( outputTypes.parameters["input"], IECore.FloatData( 1 ) )
self.assertEqual(
network.inputConnections( "types" ),
[ ( ( "outputTypes", "i" ), ( "types", "i" ) ) ]
)
def testSerialiation( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
script = Gaffer.ScriptNode()
script["n"] = GafferOSL.OSLShader()
script["n"].loadShader( s )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
self.assertEqual( script["n"]["name"].getValue(), script2["n"]["name"].getValue() )
self.assertEqual( script["n"]["type"].getValue(), script2["n"]["type"].getValue() )
self.assertEqual( script["n"]["parameters"].keys(), script2["n"]["parameters"].keys() )
self.assertEqual( script["n"]["out"].keys(), script2["n"]["out"].keys() )
def testLoadNonexistentShader( self ) :
n = GafferOSL.OSLShader()
self.assertRaises( RuntimeError, n.loadShader, "nonexistent" )
def testSearchPaths( self ) :
standardShaderPaths = os.environ["OSL_SHADER_PATHS"]
try:
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
os.environ["OSL_SHADER_PATHS"] = os.path.dirname( s )
n = GafferOSL.OSLShader()
n.loadShader( os.path.basename( s ) )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "c", "s", "m" ] )
finally:
os.environ["OSL_SHADER_PATHS"] = standardShaderPaths
def testNoConnectionToParametersPlug( self ) :
vectorToFloat = GafferOSL.OSLShader()
vectorToFloat.loadShader( "Conversion/VectorToFloat" )
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
vectorToFloat["parameters"]["p"].setInput( globals["out"]["globalP"] )
self.assertTrue( vectorToFloat["parameters"]["p"].getInput().isSame( globals["out"]["globalP"] ) )
self.assertTrue( vectorToFloat["parameters"]["p"][0].getInput().isSame( globals["out"]["globalP"][0] ) )
self.assertTrue( vectorToFloat["parameters"]["p"][1].getInput().isSame( globals["out"]["globalP"][1] ) )
self.assertTrue( vectorToFloat["parameters"]["p"][2].getInput().isSame( globals["out"]["globalP"][2] ) )
self.assertTrue( vectorToFloat["parameters"].getInput() is None )
def testStructs( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/structs.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "s", "ss" ] )
self.assertEqual( n["parameters"]["i"].defaultValue(), 2 )
self.assertEqual( n["parameters"]["f"].defaultValue(), 3 )
self.assertEqual( n["parameters"]["ss"].defaultValue(), "ss" )
self.assertEqual( n["parameters"]["s"].keys(), [ "i", "f", "c", "s" ] )
self.assertEqual( n["parameters"]["s"]["i"].defaultValue(), 1 )
self.assertEqual( n["parameters"]["s"]["f"].defaultValue(), 2 )
self.assertEqual( n["parameters"]["s"]["c"].defaultValue(), imath.Color3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["s"]["s"].defaultValue(), "s" )
n["parameters"]["s"]["i"].setValue( 10 )
n["parameters"]["s"]["f"].setValue( 21 )
n["parameters"]["s"]["c"].setValue( imath.Color3f( 3, 4, 5 ) )
n["parameters"]["s"]["s"].setValue( "ttt" )
network = n.attributes()["osl:shader"]
shader = network.outputShader()
self.assertEqual( len( shader.parameters ), 7 )
self.assertTrue( shader.parameters["i"], IECore.IntData( 2 ) )
self.assertTrue( shader.parameters["f"], IECore.FloatData( 3 ) )
self.assertTrue( shader.parameters["s.i"], IECore.IntData( 10 ) )
self.assertTrue( shader.parameters["s.f"], IECore.FloatData( 21 ) )
self.assertTrue( shader.parameters["s.c"], IECore.Color3fData( imath.Color3f( 3, 4, 5 ) ) )
self.assertTrue( shader.parameters["s.s"], IECore.StringData( "ttt" ) )
self.assertTrue( shader.parameters["ss"], IECore.StringData( "ss" ) )
h1 = n.attributesHash()
n["parameters"]["s"]["i"].setValue( 100 )
h2 = n.attributesHash()
self.assertNotEqual( h1, h2 )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
g = GafferOSL.OSLShader()
g.loadShader( s2 )
n["parameters"]["s"]["i"].setInput( g["out"]["i"] )
h3 = n.attributesHash()
self.assertNotEqual( h1, h3 )
self.assertNotEqual( h2, h3 )
def testOutputPlugAffectsHash( self ) :
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
floatToColor = GafferOSL.OSLShader()
floatToColor.loadShader( "Conversion/FloatToColor" )
floatToColor["parameters"]["r"].setInput( globals["out"]["globalU"] )
h1 = floatToColor.attributesHash()
floatToColor["parameters"]["r"].setInput( globals["out"]["globalV"] )
h2 = floatToColor.attributesHash()
self.assertNotEqual( h1, h2 )
def testCanConnectVectorToColor( self ) :
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
constant = GafferOSL.OSLShader()
constant.loadShader( "Surface/Constant" )
self.assertTrue( constant["parameters"]["Cs"].acceptsInput( globals["out"]["globalP"] ) )
constant["parameters"]["Cs"].setInput( globals["out"]["globalP"] )
self.assertTrue( constant["parameters"]["Cs"].getInput().isSame( globals["out"]["globalP"] ) )
def testClosureParameters( self ) :
outputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputClosure.osl" )
outputClosure = GafferOSL.OSLShader( "outputClosure" )
outputClosure.loadShader( outputClosureShader )
inputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/inputClosure.osl" )
inputClosure = GafferOSL.OSLShader( "inputClosure" )
inputClosure.loadShader( inputClosureShader )
self.assertEqual( outputClosure["out"]["c"].typeId(), GafferOSL.ClosurePlug.staticTypeId() )
self.assertEqual( inputClosure["parameters"]["i"].typeId(), GafferOSL.ClosurePlug.staticTypeId() )
inputClosure["parameters"]["i"].setInput( outputClosure["out"]["c"] )
network = inputClosure.attributes()["osl:surface"]
self.assertEqual( len( network ), 2 )
self.assertNotIn( "i", network.outputShader().parameters )
self.assertEqual(
network.inputConnections( "inputClosure" ),
[ network.Connection( ( "outputClosure", "c" ), ( "inputClosure", "i" ) ) ]
)
def testClosureParametersInputAcceptance( self ) :
outputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputClosure.osl" )
outputClosure = GafferOSL.OSLShader()
outputClosure.loadShader( outputClosureShader )
inputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/inputClosure.osl" )
inputClosure = GafferOSL.OSLShader()
inputClosure.loadShader( inputClosureShader )
outputColor = GafferOSL.OSLShader()
outputColor.loadShader( "Conversion/VectorToColor" )
self.assertTrue( inputClosure["parameters"]["i"].acceptsInput( outputClosure["out"]["c"] ) )
self.assertFalse( inputClosure["parameters"]["i"].acceptsInput( outputColor["out"]["c"] ) )
def testOutputClosureDirtying( self ) :
outputClosureShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputClosure.osl" )
outputClosure = GafferOSL.OSLShader()
outputClosure.loadShader( outputClosureShader )
cs = GafferTest.CapturingSlot( outputClosure.plugDirtiedSignal() )
outputClosure["parameters"]["e"]["r"].setValue( 10 )
self.assertTrue( outputClosure["out"] in [ x[0] for x in cs ] )
self.assertTrue( outputClosure["out"]["c"] in [ x[0] for x in cs ] )
def testRepeatability( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
sn1 = GafferOSL.OSLShader()
sn1.loadShader( s1 )
sn2 = GafferOSL.OSLShader()
sn2.loadShader( s2 )
sn2["parameters"]["i"].setInput( sn1["out"]["i"] )
self.assertEqual( sn2.attributesHash(), sn2.attributesHash() )
self.assertEqual( sn2.attributes(), sn2.attributes() )
def testHandlesAreHumanReadable( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
sn1 = GafferOSL.OSLShader( "Shader1" )
sn1.loadShader( s1 )
sn2 = GafferOSL.OSLShader( "Shader2" )
sn2.loadShader( s2 )
sn2["parameters"]["i"].setInput( sn1["out"]["i"] )
network = sn2.attributes()["osl:surface"]
self.assertEqual( set( network.shaders().keys() ), { "Shader1", "Shader2" } )
def testHandlesAreUniqueEvenIfNodeNamesArent( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
script = Gaffer.ScriptNode()
script["in1"] = GafferOSL.OSLShader()
script["in1"].loadShader( s1 )
script["in2"] = GafferOSL.OSLShader()
script["in2"].loadShader( s1 )
script["shader"] = GafferOSL.OSLShader()
script["shader"].loadShader( s2 )
script["shader"]["parameters"]["i"].setInput( script["in1"]["out"]["i"] )
script["shader"]["parameters"]["f"].setInput( script["in2"]["out"]["f"] )
box = Gaffer.Box.create( script, Gaffer.StandardSet( [ script["in1"] ] ) )
# because the nodes have different parents, we can give them the same name.
box["in1"].setName( "notUnique" )
script["in2"].setName( "notUnique" )
network = script["shader"].attributes()["osl:surface"]
self.assertEqual( len( network.shaders() ), 3 )
def testShaderMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/metadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n.shaderMetadata( "stringValue" ), "s" )
self.assertEqual( n.shaderMetadata( "intValue" ), 1 )
self.assertEqual( n.shaderMetadata( "floatValue" ), 0.5 )
def testParameterMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/metadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aStringValue" ), "s" )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aIntValue" ), 1 )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aFloatValue" ), 0.5 )
self.assertEqual( n.parameterMetadata( n["parameters"]["b"], "bStringValue" ), "st" )
self.assertEqual( n.parameterMetadata( n["parameters"]["b"], "bIntValue" ), 2 )
self.assertEqual( n.parameterMetadata( n["parameters"]["b"], "bFloatValue" ), 0.75 )
def testParameterArrayMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrayMetadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aStringValues" ), IECore.StringVectorData( [ "one","two" ] ) )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aIntValues" ), IECore.IntVectorData( [ 1, 2 ] ) )
self.assertEqual( n.parameterMetadata( n["parameters"]["a"], "aFloatValues" ), IECore.FloatVectorData( [ 0.25, 0.5 ] ) )
def testParameterMinMaxMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/metadataMinMax.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertAlmostEqual( n["parameters"]["b"].minValue(), 2.3, delta = 0.00001 )
self.assertAlmostEqual( n["parameters"]["b"].maxValue(), 4.7, delta = 0.00001 )
self.assertEqual( n["parameters"]["c"].minValue(), 23 )
self.assertEqual( n["parameters"]["c"].maxValue(), 47 )
self.assertEqual( n["parameters"]["d"].minValue(), imath.Color3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["d"].maxValue(), imath.Color3f( 4, 5, 6 ) )
self.assertEqual( n["parameters"]["e"].minValue(), imath.V3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["e"].maxValue(), imath.V3f( 4, 5, 6 ) )
self.assertEqual( n["parameters"]["f"].minValue(), imath.V3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["f"].maxValue(), imath.V3f( 4, 5, 6 ) )
self.assertEqual( n["parameters"]["g"].minValue(), imath.V3f( 1, 2, 3 ) )
self.assertEqual( n["parameters"]["g"].maxValue(), imath.V3f( 4, 5, 6 ) )
# Check default min/max if not specified
self.assertFalse( n["parameters"]["h"].hasMinValue() )
self.assertFalse( n["parameters"]["h"].hasMaxValue() )
def testParameterSplineMetadata( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineMetadata.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
# If the components of the spline all match, the metadata is registered to the spline plug
self.assertEqual( n.parameterMetadata( n["parameters"]["correctSpline"], "a" ), 1 )
self.assertEqual( n.parameterMetadata( n["parameters"]["correctSpline"], "b" ), 2 )
self.assertEqual( n.parameterMetadata( n["parameters"]["correctSpline"], "c" ), 3 )
# If the components don't match, the metadata is registered to the individual plugs
# Note that array plugs are not supported, so we can't test Values and Positions
self.assertEqual( n.parameterMetadata( n["parameters"]["incompleteSplineBasis"], "c" ), 3 )
def testMetadataReuse( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrayMetadata.osl" )
n1 = GafferOSL.OSLShader()
n1.loadShader( s )
n2 = GafferOSL.OSLShader()
n2.loadShader( s )
# we don't want every shader to have its own copy of metadata when it could be shared
self.assertTrue(
n1.parameterMetadata( n1["parameters"]["a"], "aStringValues", _copy = False ).isSame(
n2.parameterMetadata( n2["parameters"]["a"], "aStringValues", _copy = False )
)
)
# but because there is no const in python, we want to make sure that the casual
# caller doesn't have the opportunity to really break things, so unless requested
# copies are returned from the query.
n1.parameterMetadata( n1["parameters"]["a"], "aStringValues" ).value = "editingSharedConstDataIsABadIdea"
self.assertEqual( n1.parameterMetadata( n1["parameters"]["a"], "aStringValues" ), IECore.StringVectorData( [ "one", "two" ] ) )
def testAcceptsNoneInput( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertTrue( n["parameters"]["i"].acceptsInput( None ) )
def testOverzealousCycleDetection( self ) :
globals = GafferOSL.OSLShader( "Globals" )
globals.loadShader( "Utility/Globals" )
point = GafferOSL.OSLShader( "Point" )
point.loadShader( "Conversion/FloatToVector" )
noise = GafferOSL.OSLShader( "Noise" )
noise.loadShader( "Pattern/Noise" )
color = GafferOSL.OSLShader( "Color" )
color.loadShader( "Conversion/FloatToColor" )
point["parameters"]["x"].setInput( globals["out"]["globalU"] )
point["parameters"]["y"].setInput( globals["out"]["globalV"] )
noise["parameters"]["p"].setInput( point["out"]["p"] )
color["parameters"]["r"].setInput( globals["out"]["globalU"] )
color["parameters"]["g"].setInput( noise["out"]["n"] )
# Should not throw - there are no cycles above.
color.attributesHash()
color.attributes()
def testLoadNetworkFromVersion0_23( self ) :
s = Gaffer.ScriptNode()
s["fileName"].setValue( os.path.dirname( __file__ ) + "/scripts/networkVersion-0.23.2.1.gfr" )
s.load()
for plug, expectedValue, expectedInput in [
( "InFloat.parameters.name", "s", None ),
( "InFloat.parameters.defaultValue", 1, None ),
( "InFloat1.parameters.name", "t", None ),
( "InFloat1.parameters.defaultValue", 0.5, None ),
( "InFloat2.parameters.name", "u", None ),
( "InFloat2.parameters.defaultValue", 0.25, None ),
( "OutPoint.parameters.name", "stu", None ),
( "BuildPoint.parameters.x", None, "InFloat.out.value" ),
( "BuildPoint.parameters.y", None, "InFloat1.out.value" ),
( "BuildPoint.parameters.z", None, "InFloat2.out.value" ),
( "OutPoint.parameters.value", None, "BuildPoint.out.p" ),
( "OutObject.parameters.in0", None, "OutPoint.out.primitiveVariable" ),
] :
if expectedInput is not None :
self.assertTrue( s.descendant( plug ).getInput().isSame( s.descendant( expectedInput ) ) )
else :
self.assertTrue( s.descendant( plug ).getInput() is None )
if expectedValue is not None :
self.assertEqual( s.descendant( plug ).getValue(), expectedValue )
def testReload( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.osl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s1 )
self.assertEqual(
n["parameters"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"commonStruct",
"commonArray",
"removedI",
"removedF",
"removedColor",
"removedString",
"removedStruct",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
"typeChanged5",
"defaultChangedArray",
]
)
self.assertEqual(
n["parameters"]["commonStruct"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"removedI",
"removedF",
"removedColor",
"removedString",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
]
)
values = {
"commonI" : 10,
"commonF" : 25,
"commonColor" : imath.Color3f( 1 ),
"commonString" : "test",
"commonStruct.commonI" : 11,
"commonStruct.commonF" : 2.5,
"commonStruct.commonColor" : imath.Color3f( 0.5 ),
"commonStruct.commonString" : "test2",
"commonArray" : IECore.FloatVectorData( [ 0, 1, 2 ] )
}
for key, value in values.items() :
n["parameters"].descendant( key ).setValue( value )
arrayToNotGetReloaded = n["parameters"]["commonArray"]
arrayToGetReloaded = n["parameters"]["defaultChangedArray"]
self.assertTrue( isinstance( n["parameters"]["typeChanged1"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged2"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged3"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged4"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged5"], Gaffer.V3fPlug ) )
self.assertTrue( n["parameters"]["typeChanged5"].interpretation(), IECore.GeometricData.Interpretation.Vector)
n.loadShader( s2, keepExistingValues = True )
self.assertEqual(
n["parameters"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"commonStruct",
"commonArray",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
"typeChanged5",
"addedI",
"addedF",
"addedColor",
"addedString",
"addedStruct",
"defaultChangedArray",
]
)
self.assertEqual(
n["parameters"]["commonStruct"].keys(),
[
"commonI",
"commonF",
"commonColor",
"commonString",
"typeChanged1",
"typeChanged2",
"typeChanged3",
"typeChanged4",
"addedI",
"addedF",
"addedColor",
"addedString",
]
)
self.assertEqual( arrayToNotGetReloaded, n["parameters"]["commonArray"] )
self.assertNotEqual( arrayToGetReloaded, n["parameters"]["defaultChangedArray"] )
for key, value in values.items() :
self.assertEqual( n["parameters"].descendant( key ).getValue(), value )
self.assertTrue( isinstance( n["parameters"]["typeChanged1"], Gaffer.StringPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged2"], Gaffer.Color3fPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged3"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged4"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( n["parameters"]["typeChanged5"], Gaffer.V3fPlug ) )
self.assertEqual( n["parameters"]["typeChanged5"].interpretation(), IECore.GeometricData.Interpretation.Normal)
n.loadShader( s2, keepExistingValues = False )
for plug in n["parameters"] :
if isinstance( plug, Gaffer.ValuePlug ) :
self.assertTrue( plug.isSetToDefault() )
def testSplineParameters( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["parameters"].keys(), [ "floatSpline", "colorSpline" ] )
self.assertTrue( isinstance( n["parameters"]["floatSpline"], Gaffer.SplineffPlug ) )
self.assertEqual(
n["parameters"]["floatSpline"].getValue().spline(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
]
)
)
self.assertTrue( isinstance( n["parameters"]["colorSpline"], Gaffer.SplinefColor3fPlug ) )
self.assertEqual(
n["parameters"]["colorSpline"].getValue().spline(),
IECore.SplinefColor3f(
IECore.CubicBasisf.bSpline(),
[
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
]
)
)
shader = n.attributes()["osl:shader"].outputShader()
self.assertEqual(
shader.parameters["floatSpline"].value,
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
]
)
)
self.assertEqual(
shader.parameters["colorSpline"].value,
IECore.SplinefColor3f(
IECore.CubicBasisf.bSpline(),
[
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
]
)
)
def testSplineParameterEvaluation( self ) :
numSamples = 100
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
points = [
( 0, imath.Color3f( 0.5 ) ),
( 0.3, imath.Color3f( 0.2 ) ),
( 0.6, imath.Color3f( 1 ) ),
( 0.65, imath.Color3f( 0.5 ) ),
( 0.9, imath.Color3f( 0.7 ) ),
( 1, imath.Color3f( 1 ) )
]
constant = GafferImage.Constant( "Constant" )
constant["format"].setValue( GafferImage.Format( 1, numSamples, 1.000 ) )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( n["out"]["out"] )
for interpolation in [
Gaffer.SplineDefinitionInterpolation.Linear,
Gaffer.SplineDefinitionInterpolation.CatmullRom,
Gaffer.SplineDefinitionInterpolation.BSpline,
Gaffer.SplineDefinitionInterpolation.MonotoneCubic
]:
n["parameters"]["colorSpline"].setValue( Gaffer.SplineDefinitionfColor3f( points, interpolation ) )
oslSamples = list( reversed( GafferImage.ImageAlgo.image( image['out'] )["R"] ) )
s = n['parameters']['colorSpline'].getValue().spline()
cortexSamples = [ s( ( i + 0.5 ) / numSamples )[0] for i in range( numSamples ) ]
for a, b in zip( oslSamples, cortexSamples ):
self.assertAlmostEqual( a, b, places = 4 )
def testArrays( self ) :
s = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrays.osl" )
n = GafferOSL.OSLShader()
n.loadShader( s )
self.assertEqual( n["parameters"].keys(), [ "i", "f", "c", "p", "q", "s", "m" ] )
self.assertTrue( isinstance( n["parameters"]["i"], Gaffer.IntVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["f"], Gaffer.FloatVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["c"], Gaffer.Color3fVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["p"], Gaffer.V3fVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["q"], Gaffer.V3fVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["s"], Gaffer.StringVectorDataPlug ) )
self.assertTrue( isinstance( n["parameters"]["m"], Gaffer.M44fVectorDataPlug ) )
self.assertEqual( n["parameters"]["i"].defaultValue(), IECore.IntVectorData( [ 10, 11, 12 ] ) )
self.assertEqual( n["parameters"]["f"].defaultValue(), IECore.FloatVectorData( [ 1, 2 ] ) )
self.assertEqual( n["parameters"]["c"].defaultValue(), IECore.Color3fVectorData(
[ imath.Color3f( 1, 2, 3 ), imath.Color3f( 4, 5, 6 ) ] ) )
self.assertEqual( n["parameters"]["p"].defaultValue(), IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( n["parameters"]["q"].defaultValue(), IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( n["parameters"]["s"].defaultValue(), IECore.StringVectorData( [ "s", "t", "u", "v", "word" ] ) )
self.assertEqual( n["parameters"]["m"].defaultValue(), IECore.M44fVectorData(
[ imath.M44f() * 1, imath.M44f() * 0, imath.M44f() * 1 ] ) )
self.assertEqual( n["out"].typeId(), Gaffer.Plug.staticTypeId() )
network = n.attributes()["osl:surface"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.outputShader().name, s )
self.assertEqual( network.outputShader().type, "osl:surface" )
self.assertEqual( network.outputShader().parameters["i"], IECore.IntVectorData( [ 10, 11, 12 ] ) )
self.assertEqual( network.outputShader().parameters["f"], IECore.FloatVectorData( [ 1, 2 ] ) )
self.assertEqual( network.outputShader().parameters["c"], IECore.Color3fVectorData(
[ imath.Color3f( 1, 2, 3 ), imath.Color3f( 4, 5, 6 ) ] ) )
self.assertEqual( network.outputShader().parameters["p"], IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( network.outputShader().parameters["q"], IECore.V3fVectorData(
[ imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ] ) )
self.assertEqual( network.outputShader().parameters["s"], IECore.StringVectorData( [ "s", "t", "u", "v", "word" ] ) )
self.assertEqual( network.outputShader().parameters["m"], IECore.M44fVectorData(
[ imath.M44f() * 1, imath.M44f() * 0, imath.M44f() * 1 ] ) )
def testUnload( self ) :
n = GafferOSL.OSLShader()
n.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/types.osl" ) )
self.assertTrue( "osl:surface" in n.attributes() )
n.loadShader( "" )
self.assertEqual( len( n["parameters"] ), 0 )
self.assertEqual( n["type"].getValue(), "" )
self.assertEqual( n["name"].getValue(), "" )
self.assertFalse( "osl:surface" in n.attributes() )
def testLoadSurfaceAfterShader( self ) :
n = GafferOSL.OSLShader()
n.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/outputTypes.osl" ) )
self.assertEqual( len( n["out"] ), 5 )
n.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/constant.osl" ) )
self.assertEqual( len( n["out"] ), 0 )
def testReconnectionOfChildPlugShader( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferOSL.OSLShader()
s["n1"].loadShader( "Maths/AddVector" )
s["n2"] = GafferOSL.OSLShader()
s["n2"].loadShader( "Maths/AddVector" )
s["n3"] = GafferOSL.OSLShader()
s["n3"].loadShader( "Maths/AddVector" )
s["n2"]["parameters"]["a"].setInput( s["n1"]["out"]["out"] )
s["n3"]["parameters"]["a"].setInput( s["n2"]["out"]["out"] )
s.deleteNodes( filter = Gaffer.StandardSet( [ s["n2"] ] ) )
self.assertTrue( s["n3"]["parameters"]["a"].getInput().isSame( s["n1"]["out"]["out"] ) )
def testDisablingShader( self ) :
n1 = GafferOSL.OSLShader( "n1" )
n1.loadShader( "Maths/AddVector" )
n1["parameters"]["a"].setValue( imath.V3f( 5, 7, 6 ) )
n2 = GafferOSL.OSLShader( "n2" )
n2.loadShader( "Maths/AddVector" )
n3 = GafferOSL.OSLShader( "n3" )
n3.loadShader( "Maths/AddVector" )
n2["parameters"]["a"].setInput( n1["out"]["out"] )
n3["parameters"]["a"].setInput( n2["out"]["out"] )
n2["enabled"].setValue( False )
network = n3.attributes()["osl:shader"]
self.assertEqual( len( network ), 2 )
self.assertEqual( network.inputConnections( "n3" ), [ network.Connection( ( "n1", "out" ), ( "n3", "a" ) ) ] )
self.assertEqual( network.getShader( "n1" ).parameters["a"].value, imath.V3f( 5, 7, 6 ) )
def testDisabledShaderPassesThroughExternalValue( self ) :
n1 = Gaffer.Node()
n1["user"]["v"] = Gaffer.V3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n1["user"]["v"].setValue( imath.V3f( 12, 11, 10 ) )
n2 = GafferOSL.OSLShader( "n2" )
n2.loadShader( "Maths/AddVector" )
n2["parameters"]["a"].setInput( n1["user"]["v"] )
n3 = GafferOSL.OSLShader( "n3" )
n3.loadShader( "Maths/AddVector" )
n3["parameters"]["a"].setInput( n2["parameters"]["a"] )
n2["enabled"].setValue( False )
network = n3.attributes()["osl:shader"]
self.assertEqual( len( network ), 1 )
self.assertEqual( network.getShader( "n3" ).parameters["a"].value, imath.V3f( 12, 11, 10 ) )
def testDisabledShaderEvaluatesStateCorrectly( self ) :
redShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/red.osl" )
greenShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/green.osl" )
n2 = GafferOSL.OSLShader( "red1" )
n2.loadShader( redShader )
n3 = GafferOSL.OSLShader( "green1" )
n3.loadShader( greenShader )
n1 = GafferOSL.OSLShader( "add" )
n1.loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/add.osl" ) )
n1['parameters']['a'].setInput(n2["out"]["out"])
n1['parameters']['b'].setInput(n3["out"]["out"])
sphere = GafferScene.Sphere()
shaderAssignment = GafferScene.ShaderAssignment()
shaderAssignment["in"].setInput(sphere["out"])
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
shaderAssignment["filter"].setInput(pathFilter["out"])
shaderAssignment["shader"].setInput(n1["out"]["out"])
network = shaderAssignment["out"].attributes( "/sphere" )["osl:surface"]
self.assertEqual( len( network ), 3 )
self.assertEqual( network.getShader( "red1" ).name.split( "/" )[-1], "red" )
self.assertEqual( network.getShader( "green1" ).name.split( "/" )[-1], "green")
self.assertEqual( network.getShader( "add" ).name.split( "/" )[-1], "add")
# when we disable the add shader we should get the pass through parameter's ("a") shader (n2)
n1["enabled"].setValue( False )
network = shaderAssignment["out"].attributes( "/sphere" )["osl:surface"]
self.assertEqual( len ( network ), 1 )
self.assertEqual( network.getShader( "red1" ).name.split( "/" )[-1], "red" )
def testShaderSerialisation( self ) :
s = Gaffer.ScriptNode()
s['n2'] = GafferOSL.OSLShader()
s['n2'].loadShader( "Pattern/Noise" )
s['n'] = GafferOSL.OSLShader()
s['n'].loadShader( "Pattern/Noise" )
s['n2']['parameters']['scale'].setInput( s['n']['out']['n'] )
self.assertEqual( s['n2']['parameters']['scale'].getInput(), s['n']['out']['n'] )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2['n2']['parameters']['scale'].getInput(), s2['n']['out']['n'] )
def testSplineParameterSerialisation( self ) :
s = Gaffer.ScriptNode()
shad = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.osl" )
s['n'] = GafferOSL.OSLShader()
s['n'].loadShader( shad )
splineValue = Gaffer.SplineDefinitionfColor3f( [ ( random.random(), imath.Color3f( random.random(), random.random(), random.random() ) ) for i in range( 10 ) ], Gaffer.SplineDefinitionInterpolation.Linear )
s['n']["parameters"]["colorSpline"].setValue( splineValue )
serialised = s.serialise()
colorSplineLines = [ i for i in serialised.split( "\n" ) if "colorSpline" in i ]
# Expect a clearPoint line to get serialised
self.assertEqual( 1, sum( "clearPoints" in i for i in colorSplineLines ) )
# Expect 3 addChilds per point ( The parent plug, and x and y )
self.assertEqual( 30, sum( "addChild" in i for i in colorSplineLines ) )
s2 = Gaffer.ScriptNode()
s2.execute( serialised )
self.assertEqual( s2['n']["parameters"]["colorSpline"].getValue(), splineValue )
def testComponentToComponentConnections( self ) :
n1 = GafferOSL.OSLShader( "n1" )
n1.loadShader( "Maths/MixColor" )
n2 = GafferOSL.OSLShader( "n2" )
n2.loadShader( "Maths/MixColor" )
n2["parameters"]["a"]["r"].setInput( n1["out"]["out"]["g"] )
n2["parameters"]["a"]["g"].setInput( n1["out"]["out"]["b"] )
n2["parameters"]["a"]["b"].setInput( n1["out"]["out"]["r"] )
network = n2.attributes()["osl:shader"]
self.assertEqual(
network.inputConnections( "n2" ),
[
( ( "n1", "out.r" ), ( "n2", "a.b" ) ),
( ( "n1", "out.b" ), ( "n2", "a.g" ) ),
( ( "n1", "out.g" ), ( "n2", "a.r" ) ),
]
)
def testGetConnectedParameterValueInsideSceneNode( self ) :
n = GafferScene.SceneNode()
n["n1"] = GafferOSL.OSLShader()
n["n1"].loadShader( "Maths/AddColor" )
n["n2"] = GafferOSL.OSLShader()
n["n2"].loadShader( "Maths/AddColor" )
n["n2"]["parameters"]["a"].setInput( n["n1"]["out"]["out"] )
self.assertEqual( n["n2"]["parameters"]["a"].getValue(), imath.Color3f( 0 ) )
def testOutputNameIncludedInNetwork( self ) :
shader = GafferOSL.OSLShader( "globals" )
shader.loadShader( "Utility/Globals" )
shaderPlug = GafferScene.ShaderPlug()
shaderPlug.setInput( shader["out"] )
network1 = shaderPlug.attributes()["osl:shader"]
hash1 = shaderPlug.attributesHash()
shaderPlug.setInput( shader["out"]["globalP"] )
network2 = shaderPlug.attributes()["osl:shader"]
hash2 = shaderPlug.attributesHash()
shaderPlug.setInput( shader["out"]["globalN"] )
network3 = shaderPlug.attributes()["osl:shader"]
hash3 = shaderPlug.attributesHash()
self.assertEqual( network1.getOutput(), IECoreScene.ShaderNetwork.Parameter( "globals" ) )
self.assertEqual( network2.getOutput(), IECoreScene.ShaderNetwork.Parameter( "globals", "globalP" ) )
self.assertEqual( network3.getOutput(), IECoreScene.ShaderNetwork.Parameter( "globals", "globalN" ) )
self.assertEqual( network1.getShader( "global" ), network2.getShader( "global" ) )
self.assertEqual( network1.getShader( "global" ), network3.getShader( "global" ) )
self.assertNotEqual( hash1, hash2 )
self.assertNotEqual( hash2, hash3 )
def testShaderTypeAssignsAsSurfaceType( self ) :
plane = GafferScene.Plane()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
shader = GafferOSL.OSLShader( "globals" )
shader.loadShader( "Maths/AddColor" )
shaderAssignment = GafferScene.ShaderAssignment()
shaderAssignment["in"].setInput( plane["out"] )
shaderAssignment["shader"].setInput( shader["out"]["out"] )
shaderAssignment["filter"].setInput( planeFilter["out"] )
self.assertEqual( shaderAssignment["out"].attributes( "/plane" ).keys(), [ "osl:surface" ] )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "2edd70a3c6af48d82234c5743d6fc77c",
"timestamp": "",
"source": "github",
"line_count": 1048,
"max_line_length": 208,
"avg_line_length": 36.81393129770992,
"alnum_prop": 0.6615950856639279,
"repo_name": "boberfly/gaffer",
"id": "a884949f1a2fd6785ffc66d59b8775a9dd50216f",
"size": "40372",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/GafferOSLTest/OSLShaderTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7646009"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "8002810"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
}
|
"""This code example creates new cdn configurations.
To determine which configurations exist, run get_all_cdn_configurations.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
cdn_config_service = client.GetService('CdnConfigurationService',
version='v202211')
# Create cdn config objects.
# Only LIVE_STREAM_SOURCE_CONTENT is currently supported by the API.
configs = [{
# Basic example with no security policies.
'name': 'ApiConfig1',
'cdnConfigurationType': 'LIVE_STREAM_SOURCE_CONTENT',
'sourceContentConfiguration': {
'ingestSettings': {
'urlPrefix': 'ingest1.com',
'securityPolicy': {
'securityPolicyType': 'NONE'
}
},
'defaultDeliverySettings': {
'urlPrefix': 'delivery1.com',
'securityPolicy': {
'securityPolicyType': 'NONE'
}
}
}
}, {
# Complex example with security policies.
'name': 'ApiConfig2',
'cdnConfigurationType': 'LIVE_STREAM_SOURCE_CONTENT',
'sourceContentConfiguration': {
'ingestSettings': {
'urlPrefix': 'ingest1.com',
'securityPolicy': {
'securityPolicyType': 'AKAMAI',
'disableServerSideUrlSigning': False,
'tokenAuthenticationKey': 'abc123',
}
},
'defaultDeliverySettings': {
'urlPrefix': 'delivery1.com',
'securityPolicy': {
'securityPolicyType': 'AKAMAI',
'disableServerSideUrlSigning': True,
'originForwardingType': 'CONVENTIONAL',
'originPathPrefix': '/path/to/my/origin'
}
}
}
}]
# Add configs.
configs = cdn_config_service.createCdnConfigurations(configs)
# Display results.
for config in configs:
print('Created CDN configuration with type "%s" and name "%s".'
% (config['cdnConfigurationType'], config['name']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "fc44b6c1c7af17eb8a684a334a8a4503",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 75,
"avg_line_length": 32.19178082191781,
"alnum_prop": 0.5706382978723404,
"repo_name": "googleads/googleads-python-lib",
"id": "32a3e6d9cd572019ed7686c57c550c1481aa6a09",
"size": "2972",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202211/cdn_configuration_service/create_cdn_configurations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
from absl.testing import absltest
import jaxopt
from jaxopt._src import test_util
class ImportTest(test_util.JaxoptTestCase):
def test_implicit_diff(self):
jaxopt.implicit_diff.root_vjp
from jaxopt.implicit_diff import root_vjp
def test_prox(self):
jaxopt.prox.prox_none
from jaxopt.prox import prox_none
def test_projection(self):
jaxopt.projection.projection_simplex
from jaxopt.projection import projection_simplex
def test_tree_util(self):
from jaxopt.tree_util import tree_vdot
def test_linear_solve(self):
from jaxopt.linear_solve import solve_lu
def test_base(self):
from jaxopt.base import LinearOperator
def test_perturbations(self):
from jaxopt.perturbations import make_perturbed_argmax
def test_loss(self):
jaxopt.loss.binary_logistic_loss
from jaxopt.loss import binary_logistic_loss
def test_objective(self):
jaxopt.objective.least_squares
from jaxopt.objective import least_squares
def test_loop(self):
from jaxopt.loop import while_loop
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "9cd6644a0247975ae861f65f4b78ea24",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 58,
"avg_line_length": 23.76086956521739,
"alnum_prop": 0.7365050320219579,
"repo_name": "google/jaxopt",
"id": "0b6882c1c93d88f6f238de8ff7333df7bb1993eb",
"size": "1669",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/import_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "670734"
}
],
"symlink_target": ""
}
|
"""Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import print_function, division
from sympy.core import S, C, sympify, pi, Dummy
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import oo, Rational
from sympy.core.compatibility import range
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.geometry.exceptions import GeometryError
from sympy.polys import Poly, PolynomialError, DomainError
from sympy.polys.polyutils import _nsort, _not_a_coeff
from sympy.solvers import solve
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from .entity import GeometryEntity
from .point import Point
from .line import LinearEntity, Line
from .util import _symbol, idiff
import random
from sympy.utilities.decorator import doctest_depends_on
class Ellipse(GeometryEntity):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center)
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point(0, 0)
"""
return self.args[0]
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = a - b < 0
if o == True:
return a
elif o == False:
return b
return self.vradius
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = b - a < 0
if o == True:
return a
elif o == False:
return b
return self.hradius
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = C.Dummy('x', real=True)
return 4*self.major*C.Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
@property
def focus_distance(self):
"""The focale distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point(-2*sqrt(2), 0), Point(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(*self.args)
if (angle/S.Pi).is_integer:
return super(Ellipse, self).rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
from .util import _uniquely_named_symbol
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line(Point(3, 0), Point(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and len(inter) == 1
and isinstance(inter[0], Point))
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line(Point(0, 0), Point(1, 0))]
>>> e.normal_lines(c)
[Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line(Point(-38/47, -85/31), Point(9/47, -21/17)),
Line(Point(19/13, -43/21), Point(32/13, -8/3))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
if len(xeq.free_symbols) == 1:
try:
# this is so much faster, it's worth a try
xsol = Poly(xeq, x).real_roots()
except (DomainError, PolynomialError, NotImplementedError):
xsol = _nsort(solve(xeq, x), separated=True)[0]
points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol]
else:
raise NotImplementedError(
'intersections for the general ellipse are not supported')
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
points = [pt.n(prec) for pt in points]
slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*C.cos(t),
self.center.y + self.vradius*C.sin(t))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
# Definite and potential symbolic intersections are allowed.
elif (det > 0) != False:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point(0, -7), Point(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point(0, -3*sqrt(15)/4), Point(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point(2, -3*sqrt(7)/4), Point(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point(-363/175, -48*sqrt(111)/175), Point(-363/175, 48*sqrt(111)/175), Point(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point(-17/5, -12/5), Point(-17/5, 12/5), Point(7/5, -12/5), Point(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x)
y = _symbol(y)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __contains__(self, o):
if isinstance(o, Point):
x = C.Dummy('x', real=True)
y = C.Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle costructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0])
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
from .polygon import Polygon
|
{
"content_hash": "7ffc90394c8716e9d9fb776e9fd16bc3",
"timestamp": "",
"source": "github",
"line_count": 1507,
"max_line_length": 96,
"avg_line_length": 27.2435301924353,
"alnum_prop": 0.5064545985970382,
"repo_name": "Sumith1896/sympy",
"id": "bedbfde2621d2c2cc43601cc17e527bb8f0260b1",
"size": "41056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/geometry/ellipse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13599543"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/engine/shared_bwing_engine_s02.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "e0d38c71938ca7b10e59526d01da738f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.7053291536050157,
"repo_name": "anhstudios/swganh",
"id": "6e48894f55d5f7d6e45b2b3b93ebdb5ce93a863a",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/attachment/engine/shared_bwing_engine_s02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import sys
import os
from qt import *
from qtcanvas import *
from treecanvas import *
from treecanvasview import *
from lpathtree_qt import *
from axis import *
from db import *
from dbdialog import *
from sqlviewdialog import *
from overlay import *
from translator import translate
from parselpath import parse_lpath
from lpath import tokenize
class QBA(QMainWindow):
def __init__(self, tree=None):
QMainWindow.__init__(self)
self.setCaption("LPath QBA")
self.statusBar() # create a status bar
self.db = None
self.queryTree = None # tree on which LPath query was built
self.overlayIdx = None
self.overlays = None
self.treecanvas = None
menuBar = self.menuBar()
menu_File = QPopupMenu(menuBar)
menu_File.insertItem("Save image", self.menu_File_SaveImage)
menu_File.insertSeparator()
menu_File.insertItem("E&xit", qApp, SLOT("closeAllWindows()"))
menu_View = QPopupMenu(menuBar)
menu_View.insertItem("&SQL Translation", self.menu_View_SqlTranslation)
menu_Tools = QPopupMenu(menuBar)
menu_Tools.insertItem("Connect to Database", self.menu_Tools_SetupDatabase)
menu_Tools.insertItem("Select LPath table", self.menu_Tools_SelectLPathTable)
menuBar.insertItem("&File", menu_File)
menuBar.insertItem("&View", menu_View)
menuBar.insertItem("&Tools", menu_Tools)
self.cw = QWidget(self)
self.setCentralWidget(self.cw)
self.layout = QGridLayout(self.cw)
self.treeview = TreeCanvasView(self.cw)
self.layout.addWidget(self.treeview,2,1)
self.layout.setRowStretch(2,1)
if tree:
self.setTree(tree)
hbox = QHBox(self.cw)
hbox.setSpacing(2)
hbox.setMargin(3)
QLabel("LPath\nQuery:", hbox)
self.entQuery = QTextView(hbox)
self.entQuery.setFixedHeight(40)
self.btnQuery = QPushButton("Submit Query", hbox)
self.btnQuery.setFixedHeight(40)
self.layout.addWidget(hbox, 1, 1)
self.layout.setRowStretch(1,0)
self.toolPanel = QDockWindow(self)
self.bgrpTools = QButtonGroup(1, Qt.Horizontal, self.toolPanel)
self.toolPanel.setWidget(self.bgrpTools)
self.addDockWindow(self.toolPanel, Qt.DockLeft)
self.bgrpTools.setExclusive(True)
self.btnClear = QPushButton("Clear", self.bgrpTools)
self.btnNextTree = QPushButton("Next tree", self.bgrpTools)
self.btnNextMatch = QPushButton("Next match", self.bgrpTools)
self.btnNextTree.setEnabled(False)
self.connect(self.btnClear, SIGNAL("clicked()"), self.clearDisplay)
self.connect(self.btnQuery, SIGNAL("clicked()"), self.query)
self.connect(self.btnNextTree, SIGNAL("clicked()"), self.fetchNextTree)
self.connect(self.btnNextMatch, SIGNAL("clicked()"), self.displayNextOverlay)
self.connect(self.treeview, PYSIGNAL("changed"), self._setLPath)
self.connect(self.treeview, PYSIGNAL("highlightLPath"), self._setLPathColor)
self._saveImageDir = None # remember the last "save image" directory
self._queryJustSubmitted = False
def clearDisplay(self):
self.treeview.clear()
self.entQuery.clear()
def query(self):
if self.db is not None:
if not self.treeview.canvas(): return
t = self.treeview.canvas().getTreeModel()
lpath = translate(t)
if lpath is None: return
self.disconnect(self.db.emitter, PYSIGNAL("gotMoreTree"), self.gotMoreTree)
self._queryJustSubmitted = True
self.statusBar().message("Submitted the query. Please wait...")
self.btnNextTree.setEnabled(False)
if self.db.submitQuery(lpath) == True:
self.queryTree = parse_lpath(lpath)
else:
self.statusBar().message("Query failed.")
def _setLPath(self):
t = self.treeview.canvas().getTreeModel()
lpath = translate(t,space=' ')
if lpath is None:
self.entQuery.setText('')
else:
self.entQuery.setText(lpath)
def _setLPathColor(self, s):
self.entQuery.setText(s)
def fetchNextTree(self):
if self.db:
res = self.db.fetchNextTree()
if not res: return
sid, tid, sql, t, ldb, sql2 = res
self.setCaption("LPath QBA: Tree %s" % sid)
self.setTree(t)
self.overlays = find_overlays(sql2, ldb, self.queryTree, t)
self.overlayIdx = len(self.overlays)-1
self.displayNextOverlay()
n = self.db.getNumTreesInMem()
self.btnNextTree.setText("Next tree (%d)" % n)
if n == 0:
self.btnNextTree.setEnabled(False)
def setLineShapeFollowing(self):
self.treeview.overrideLineShape(AxisFollowing)
def setLineShapeImmFollowing(self):
self.treeview.overrideLineShape(AxisImmediateFollowing)
def setLineShapeSibling(self):
self.treeview.overrideLineShape(AxisSibling)
def setLineShapeImmSibling(self):
self.treeview.overrideLineShape(AxisImmediateSibling)
def setLineShapeParent(self):
self.treeview.overrideLineShape(AxisParent)
def setLineShapeAncestor(self):
self.treeview.overrideLineShape(AxisAncestor)
def setTree(self, treemodel):
self.treecanvas = TreeCanvas(treemodel)
self.treeview.setCanvas(self.treecanvas)
self.connect(self.treecanvas,PYSIGNAL('treeUpdated'),self._treeUpdated)
#self.treecanvas.setData(treemodel)
def _treeUpdated(self, *args):
self._setLPath()
# menu callbacks
def menu_File_SaveImage(self):
if self.treecanvas is None:
QMessageBox.warning(self, "No image available",
"There is not tree image to save.")
return
pixmap = self.treecanvas.getAsPixmap()
d = QFileDialog(self, None, True)
d.setCaption("Save As")
d.setMode(QFileDialog.AnyFile)
if self._saveImageDir:
d.setDir(self._saveImageDir)
d.setFilters("PNG (*.png);;"
"JPEG (*.jpg);;"
"BMP (*.bmp);;"
"XPM (*.xpm)")
if d.exec_loop() == QDialog.Rejected: return
filenam = d.selectedFile()
filenam = unicode(filenam)
self._saveImageDir = os.path.dirname(filenam)
if os.path.exists(filenam):
res = QMessageBox.question(
self,
"Error",
"File already exists.\n\n%s\n\n"
"It will be overwritten." % filenam,
QMessageBox.Ok,
QMessageBox.Cancel
)
if res == QMessageBox.Cancel: return
filter = d.selectedFilter()
fmt = str(filter).split()[0]
res = pixmap.save(filenam, fmt)
if res == False:
QMessageBox.warning(
self,
"Error",
"Failed to save image as:\n\n%s" % filenam)
def menu_View_SqlTranslation(self):
t = self.treeview.canvas().getTreeModel()
q = translate(t)
if q.strip():
d = SqlViewDialog(lpql=q, parent=self)
d.exec_loop()
def menu_Tools_SetupDatabase(self):
d = DatabaseConnectionDialog(parent=self)
if d.exec_loop():
self.statusBar().message("Connecting to the database...")
self.db = d.getLPathDb()
self.statusBar().message("Connecting to the database... ok", 150)
self.db.connectToEvent(self.db.EVENT_MORE_TREE, self)
tables = self.db.listTables()
if len(tables) > 1:
self.menu_Tools_SelectLPathTable()
elif len(tables) == 1:
msg = "Accessing table %s. Please wait..." % tables[0]
self.statusBar().message(msg)
self.db.switchLPathTable(tables[0])
self._queryJustSubmitted = True
else:
self.db = None
def menu_Tools_SelectLPathTable(self):
# check if self.tables is not empty
if self.db:
d = TableSelectionDialog(self.db.listTables())
if d.exec_loop():
table = d.getSelectedTable()
msg = "Accessing table %s. Please wait..." % table
self.statusBar().message(msg)
self.db.switchLPathTable(table)
self._queryJustSubmitted = True
else:
QMessageBox.warning(self, "No DB connection",
"Connect to a database first.")
def customEvent(self, e):
if self.db and e.type()==self.db.EVENT_MORE_TREE:
self.gotMoreTree(e.data())
def gotMoreTree(self, numTrees):
self.statusBar().clear()
self.btnNextTree.setText("Next tree (%d)" % numTrees)
if numTrees == 0:
self.btnNextTree.setEnabled(False)
else:
self.btnNextTree.setEnabled(True)
if numTrees == 1 and self._queryJustSubmitted:
self.statusBar().message("Received the first tree.", 300)
self.fetchNextTree()
self._queryJustSubmitted = False
def displayNextOverlay(self):
if self.overlays:
self.overlays[self.overlayIdx].clear()
self.overlayIdx = (self.overlayIdx + 1) % len(self.overlays)
self.overlays[self.overlayIdx].display()
self.btnNextMatch.setText("Next match (%d/%d)" % \
(self.overlayIdx+1, len(self.overlays)))
self._setLPath()
def main():
app = QApplication(sys.argv)
w = QBA()
app.setMainWidget(w)
if len(sys.argv) == 2:
generator = LPathTreeModel.importTreebank(file(sys.argv[1]))
w.setTree(generator.next())
w.show()
w.setCaption('LPath QBA') # this is only necessary on windows
app.exec_loop()
if __name__ == "__main__":
main()
|
{
"content_hash": "136f12ad1db5af74e50a5ab62ce66525",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 87,
"avg_line_length": 38.04428044280443,
"alnum_prop": 0.5860329776915616,
"repo_name": "hectormartinez/rougexstem",
"id": "8628a0922f00e2967cc743ed189790ee81e09d9a",
"size": "10310",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/lpath/qba.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "252646"
},
{
"name": "Batchfile",
"bytes": "2712"
},
{
"name": "C",
"bytes": "3446743"
},
{
"name": "C#",
"bytes": "3511"
},
{
"name": "CSS",
"bytes": "1240"
},
{
"name": "HTML",
"bytes": "315849"
},
{
"name": "M4",
"bytes": "4099"
},
{
"name": "Makefile",
"bytes": "199393"
},
{
"name": "Perl",
"bytes": "378641"
},
{
"name": "Perl6",
"bytes": "67212"
},
{
"name": "Python",
"bytes": "3712683"
},
{
"name": "Shell",
"bytes": "319340"
},
{
"name": "TeX",
"bytes": "536677"
},
{
"name": "XQuery",
"bytes": "5987"
},
{
"name": "XS",
"bytes": "45555"
}
],
"symlink_target": ""
}
|
import os
import time
import math
from IPython.display import display, HTML
import radiopadre
from radiopadre.render import render_refresh_button
class FileBase(object):
"""Base class referring to a datafile. Sets up some standard attributes in the constructor.
Attributes:
fullpath: the full path to the file, e.g. results/dir1/file1.txt
path: path to file relative to root padre directory, e.g. dir1/file1.txt
name: the filename (os.path.basename(path)), e.g. file1.txt
ext: extension with leading dot, e.g. .txt
basename: filename sans extension, e.g. file1
basepath: path+filename sans extension, e.g. dir1/file1
mtime: modification time
mtime_str: string version of mtime
size: size in bytes
size_str: human-readable size string
"""
_unit_list = zip(['', 'k', 'M', 'G', 'T', 'P'], [0, 0, 1, 2, 2, 2])
def __init__(self, path, root=""):
"""Construct a datafile and set up standard attributes.
Args:
path: path to the file
root: root folder, will be stripped from beginning of file path if not empty
"""
self.fullpath = path
if root and path.startswith(root):
path = path[len(root):]
if path.startswith("/"):
path = path[1:]
self.path = path
self.name = os.path.basename(self.path)
self.basepath, self.ext = os.path.splitext(self.path)
self.basename = os.path.basename(self.basepath)
self.size = os.path.getsize(self.fullpath)
self.update_mtime()
# human-friendly size
if self.size > 0:
exponent = min(int(math.log(self.size, 1024)),
len(self._unit_list) - 1)
quotient = float(self.size) / 1024 ** exponent
unit, num_decimals = self._unit_list[exponent]
format_string = '{:.%sf}{}' % (num_decimals)
self.size_str = format_string.format(quotient, unit)
else:
self.size_str = '0'
@staticmethod
def sort_list(filelist, opt="xnt"):
"""
Sort a list of FileBase objects by name, eXtension, Time, Size, optionally Reverse
"""
opt = opt.lower()
# build up order of comparison
cmpattr = []
for attr in opt:
if attr in FileBase._sort_attributes:
cmpattr.append(FileBase._sort_attributes[attr])
def compare(a, b, attrs=cmpattr):
for attr in attrs:
result = cmp(getattr(a, attr), getattr(b, attr))
if result:
return result
return 0
list.sort(filelist, cmp=compare, reverse='r' in opt)
return filelist
_sort_attributes = dict(x="ext", n="basepath", s="size", t="mtime")
def update_mtime (self):
"""Updates mtime and mtime_str attributes according to current file mtime,
returns mtime_str"""
self.mtime = os.path.getmtime(self.fullpath)
self.mtime_str = time.strftime(radiopadre.TIMEFORMAT,
time.localtime(self.mtime))
return self.mtime_str
def is_updated (self):
"""Returns True if mtime of underlying file has changed"""
return os.path.getmtime(self.fullpath) > self.mtime
def __str__(self):
return self.path
def _repr_html_(self):
return self.show() or self.path
def show(self, *args, **kw):
print self.path
def watch(self, *args, **kw):
display(HTML(render_refresh_button()))
return self.show(*args, **kw)
def data_file(path, root=""):
"""
Creates DataFile object of appropriate type, based on filename extension
"""
from radiopadre.fitsfile import FITSFile
from radiopadre.imagefile import ImageFile
from radiopadre.textfile import TextFile
ext = os.path.splitext(path)[1]
if ext.lower() in [".fits", ".fts"]:
return FITSFile(path, root=root)
elif ext.lower() in [".png", ".jpg", ".jpeg"]:
return ImageFile(path, root=root)
elif ext.lower() in [".txt", ".log"]:
return TextFile(path, root=root)
return FileBase(path, root=root)
def compute_thumb_geometry(N, ncol, mincol, maxcol, width, maxwidth):
"""
Works out thumbnail geometry.
Given nfiles thumbsnails to display, how many rows and columns do we need
to make, and how wide do we need to make the plot?
args:
N: number of thumbnails to display
ncol: use a fixed number of columns. If 0, uses mincol/maxcol.
mincol: use a minimum of that many columns, even if N is fewer.
If N<mincol, will use mincol columns
maxcol: use a maximum of that many columns. If 0, makes a single row
of N columns.
width: if non-zero, fixes width of individual thumbnail, in inches
maxwidth: if width is 0, uses a width of maxwidth/ncol for each
thumbnail
Returns:
tuple of nrow, ncol, width
"""
# figure out number of columns
if not ncol:
mincol = mincol or radiopadre.MINCOL or 0
maxcol = maxcol or radiopadre.MAXCOL or 8
ncol = max(mincol, min(maxcol, N))
# number of rows
nrow = int(math.ceil(N / float(ncol)))
# individual thumbnail width
width = width or ((maxwidth or radiopadre.WIDTH or 16) / float(ncol))
return nrow, ncol, width
|
{
"content_hash": "32ff76738ba8a52b1700d4ddaca7d58a",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 95,
"avg_line_length": 35.28662420382165,
"alnum_prop": 0.5924187725631769,
"repo_name": "o-smirnov/padre",
"id": "4f4f41837259b47501dee4cdd7da67ea56d475ae",
"size": "5540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radiopadre/file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6855"
},
{
"name": "Jupyter Notebook",
"bytes": "13025"
},
{
"name": "Python",
"bytes": "51581"
},
{
"name": "Shell",
"bytes": "3401"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib import admin
from core.models import Post
from core.models import Category
from core.models import Comment
from core.models import Tag
from core.forms import PostForm
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
# raw_id_fields = ('tags',)
form = PostForm
class Meta:
model = Post
admin.site.register(Category)
admin.site.register(Comment)
admin.site.register(Tag)
|
{
"content_hash": "b68e08d949a5249b0e0ac534249e37fa",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 39,
"avg_line_length": 25.5,
"alnum_prop": 0.7581699346405228,
"repo_name": "valbertovc/blog_django_bootstrap_ajax",
"id": "1129c4289ec35f2181c67b23ea2e26bb298bb16c",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75882"
},
{
"name": "HTML",
"bytes": "30317"
},
{
"name": "JavaScript",
"bytes": "516222"
},
{
"name": "Python",
"bytes": "41576"
}
],
"symlink_target": ""
}
|
"The quick brown fox jumps over the lazy dog"
"The quick brown fox jumps over the lazy dog"
"The quick brown fox jumps over the lazy cat"
"The quick brown fox jumps over the lazy dog"
# Expression registers are similar but not quite--They are for evaluating arbitrary vimscript expressions. This MAY
# include a system() that shells out to something external, but probably not the greatest idea. Like other registers,
# you can access the expression register via <CTRL-r>--In this case, `<CTRL-r>=`.
# Ex: `<CTRL-r>=1+1<CR>` from insert mode will print '2'
|
{
"content_hash": "048f60b9628956992611eff95c06f00a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 118,
"avg_line_length": 51.09090909090909,
"alnum_prop": 0.7473309608540926,
"repo_name": "alexkuang/vim-ll",
"id": "3b7abd1d6623c26a77f2d9b8182407782ccba5bd",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "129935"
},
{
"name": "JavaScript",
"bytes": "213881"
},
{
"name": "PHP",
"bytes": "92"
},
{
"name": "Python",
"bytes": "2133"
},
{
"name": "Scala",
"bytes": "3623"
}
],
"symlink_target": ""
}
|
import numpy as np
from matplotlib import pyplot, cm
min_samples = 256
with open('matplotlib_color_maps.py', 'w') as file:
file.write("""\"\"\"
matplotlib colormaps.
For more information, see
`Choosing Colormaps in Matplotlib <https://matplotlib.org/stable/tutorials/colors/colormaps.html>`_.
\"\"\"
""")
for name in sorted(pyplot.colormaps()):
cmap = cm.get_cmap(name)
name_c = name.capitalize()
if name_c == name:
file.write('{} = [ \n'.format(name))
else:
# compability with older matplotlib_color_maps.py where all names were capitalized
file.write('{} = {} = [ \n'.format(name, name_c))
# cmap.N is the actual number of datapoints the map is constructed with
for x in np.linspace(0, 1, max(cmap.N, min_samples)):
r, g, b = cmap(x)[:3]
file.write(' {x:.4f}, {r:.4f}, {g:.4f}, {b:.4f},\n'.format(**locals()))
file.write(']\n\n')
|
{
"content_hash": "8e48cc4b14f521114a81cf2699fb8723",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 100,
"avg_line_length": 33.275862068965516,
"alnum_prop": 0.5844559585492228,
"repo_name": "K3D-tools/K3D-jupyter",
"id": "e5b1f1a7cc7b56ac741d88a4cdaddf834e59da0f",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "k3d/colormaps/generate_matplotlib_color_maps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "1326"
},
{
"name": "GLSL",
"bytes": "33792"
},
{
"name": "HTML",
"bytes": "8112"
},
{
"name": "JavaScript",
"bytes": "599147"
},
{
"name": "Jupyter Notebook",
"bytes": "5311"
},
{
"name": "Python",
"bytes": "1949685"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
}
|
__all__ = ['GATDLSession', 'GAList', 'GAMetadata', 'GARoot', 'GATask', 'GAUser']
from .galist import GAList
from .gametadata import GAMetadata
from .garoot import GARoot
from .gatask import GATask
from .gauser import GAUser
from .gatdlsession import GATDLSession
from .sdkinfo import SDKInfo
def __setup_bambou():
""" Avoid having bad behavior when using importlib.import_module method
"""
import pkg_resources
from bambou import BambouConfig, NURESTModelController
default_attrs = pkg_resources.resource_filename(__name__, '/resources/attrs_defaults.ini')
BambouConfig.set_default_values_config_file(default_attrs)
NURESTModelController.register_model(GAList)
NURESTModelController.register_model(GAMetadata)
NURESTModelController.register_model(GARoot)
NURESTModelController.register_model(GATask)
NURESTModelController.register_model(GAUser)
__setup_bambou()
|
{
"content_hash": "b6c86be1aec007d0cf13c7c5efe6b09d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 94,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.7628415300546448,
"repo_name": "nuagenetworks/monolithe",
"id": "45a255ee60b60fbead7faf6bf058c5e505b07085",
"size": "1045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/base/sdk/python/tdldk/v1_0/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16165"
},
{
"name": "HTML",
"bytes": "983593"
},
{
"name": "JavaScript",
"bytes": "93413"
},
{
"name": "Python",
"bytes": "415189"
},
{
"name": "Smarty",
"bytes": "184108"
}
],
"symlink_target": ""
}
|
from celery.task import Task
from odnoklassniki_users.models import User
class OdnoklassnikiUsersFetchUsers(Task):
def run(self, ids, only_expired, *args, **kwargs):
return User.remote.fetch(ids=ids) # , only_expired=only_expired)
|
{
"content_hash": "780cbd61f21276115abd76f11a0167d7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 73,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.7398373983739838,
"repo_name": "ramusus/django-odnoklassniki-users",
"id": "b66d3fdc53069d4518893c51435ba42c0fd13cc6",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odnoklassniki_users/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30313"
}
],
"symlink_target": ""
}
|
import base64
import datetime
import hashlib
import io
import itertools
import json
import os
import random
import shutil
import subprocess
import tarfile
import tempfile
from configparser import ConfigParser
from contextlib import contextmanager
import factory
import factory.fuzzy
import factory.alchemy
import faker
from factory.alchemy import SQLAlchemyModelFactory
from flask import url_for, current_app
from flask_testing import TestCase
from spkrepo import create_app
from spkrepo.ext import db
from spkrepo.models import (Package, Version, Description, Language, Architecture, DisplayName, Icon, Build, Firmware,
User, Role, Service, Screenshot, Download)
fake = faker.Faker()
class QueryFactory(factory.DictFactory):
timezone = fake.timezone().split('/')[1]
language = factory.LazyAttribute(lambda x: random.choice([l.code for l in Language.query.all()]))
arch = factory.LazyAttribute(lambda x: random.choice([Architecture.to_syno.get(a.code, a.code) for a in
Architecture.query.filter(Architecture.code != 'noarch').
all()]))
build = factory.LazyAttribute(lambda x: random.choice([f.build for f in Firmware.query.all()]))
major = factory.LazyAttribute(lambda x: int(Firmware.find(x.build).version.split('.')[0]))
minor = factory.LazyAttribute(lambda x: int(Firmware.find(x.build).version.split('.')[1]))
unique = factory.LazyAttribute(lambda x: 'synology_%s_%s' % (x.arch, str(random.choice([1, 2, 4, 15, 18, 24])) +
str(random.choice([12, 13, 14, 15])) +
random.choice(['', 'j', '+'])))
package_update_channel = factory.fuzzy.FuzzyChoice(['stable', 'beta'])
class UserFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = User
id = factory.Sequence(lambda n: n)
username = factory.LazyAttribute(lambda x: fake.user_name())
email = factory.LazyAttribute(lambda x: fake.email())
password = factory.LazyAttribute(lambda x: fake.password())
api_key = factory.LazyAttribute(lambda x: fake.md5())
github_access_token = None
active = True
confirmed_at = datetime.datetime.now()
class IconFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Icon
id = factory.Sequence(lambda n: n)
size = factory.fuzzy.FuzzyChoice(['72', '120'])
class ScreenshotFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Screenshot
id = factory.Sequence(lambda n: n)
class DisplayNameFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = DisplayName
language = factory.LazyAttribute(lambda x: Language.find('enu'))
displayname = factory.LazyAttribute(lambda x: ' '.join(fake.words(nb=2)).title())
class DescriptionFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Description
language = factory.LazyAttribute(lambda x: Language.find('enu'))
description = factory.LazyAttribute(lambda x: ' '.join(fake.sentences(nb=5)))
class PackageFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Package
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: 'test_%d' % n)
@factory.post_generation
def add_screenshot(self, create, extracted, **kwargs):
if extracted is None or extracted:
if not self.screenshots:
screenshot_path = os.path.join(self.name, 'screenshot_0.png')
self.screenshots.append(ScreenshotFactory.simple_generate(create, path=screenshot_path))
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
if not create:
return
os.mkdir(os.path.join(current_app.config['DATA_PATH'], obj.name))
for screenshot in obj.screenshots:
screenshot_path = os.path.join(current_app.config['DATA_PATH'], screenshot.path)
if not os.path.exists(screenshot_path):
screenshot.save(create_image('Screenshot %s' % obj.name))
class VersionFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Version
id = factory.Sequence(lambda n: n)
package = factory.SubFactory(PackageFactory)
version = factory.Sequence(lambda n: n)
upstream_version = factory.LazyAttribute(lambda x: '%d.%d.%d' % (fake.random_int(0, 5), fake.random_int(0, 10),
fake.random_int(0, 15)))
changelog = factory.LazyAttribute(lambda x: fake.sentence())
report_url = factory.LazyAttribute(lambda x: fake.url())
distributor = factory.LazyAttribute(lambda x: fake.name())
distributor_url = factory.LazyAttribute(lambda x: fake.url())
maintainer = factory.LazyAttribute(lambda x: fake.name())
maintainer_url = factory.LazyAttribute(lambda x: fake.url())
dependencies = factory.LazyAttribute(lambda x: fake.word())
conf_dependencies = factory.LazyAttribute(lambda x: json.dumps({fake.word(): {'dsm_min_ver': '5.0-4300'}}))
conflicts = factory.LazyAttribute(lambda x: fake.word())
conf_conflicts = factory.LazyAttribute(lambda x: json.dumps({fake.word(): {'dsm_min_ver': '5.0-4300'}}))
install_wizard = False
upgrade_wizard = False
startable = None
license = factory.LazyAttribute(lambda x: fake.text())
service_dependencies = factory.LazyAttribute(lambda x: [random.choice(Service.query.all())])
@factory.post_generation
def add_displayname(self, create, extracted, **kwargs):
if extracted is None or extracted:
if 'enu' not in self.displaynames:
displayname = self.package.name.replace('_', ' ').title()
self.displaynames['enu'] = DisplayNameFactory.simple_generate(create, language=Language.find('enu'),
displayname=displayname)
@factory.post_generation
def add_description(self, create, extracted, **kwargs):
if extracted is None or extracted:
if 'enu' not in self.descriptions:
self.descriptions['enu'] = DescriptionFactory.simple_generate(create, language=Language.find('enu'))
@factory.post_generation
def add_icon(self, create, extracted, **kwargs):
if extracted is None or extracted:
if '72' not in self.icons:
icon_path = os.path.join(self.package.name, str(self.version), 'icon_72.png')
self.icons['72'] = IconFactory.simple_generate(create, path=icon_path, size='72')
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
if not create:
return
os.mkdir(os.path.join(current_app.config['DATA_PATH'], obj.package.name, str(obj.version)))
for size, icon in obj.icons.items():
icon_path = os.path.join(current_app.config['DATA_PATH'], icon.path)
if not os.path.exists(icon_path):
icon.save(create_icon(obj.displaynames['enu'].displayname, int(size)))
class BuildFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Build
version = factory.SubFactory(VersionFactory)
firmware = factory.LazyAttribute(lambda x: random.choice(Firmware.query.all()))
architectures = factory.LazyAttribute(lambda x: [random.choice(Architecture.query.
filter(Architecture.code != 'noarch').
all())])
@factory.post_generation
def create_spk(self, create, extracted, **kwargs):
if not create:
return
build_filename = Build.generate_filename(self.version.package, self.version, self.firmware, self.architectures)
self.path = os.path.join(self.version.package.name, str(self.version.version), build_filename)
with create_spk(self) as spk_stream:
self.save(spk_stream)
if self.md5 is None:
spk_stream.seek(0)
self.md5 = hashlib.md5(spk_stream.read()).hexdigest()
spk_stream.close()
@classmethod
def create_batch(cls, size, **kwargs):
if 'version' in kwargs and 'firmware' not in kwargs and 'architectures' not in kwargs:
combinations = itertools.product(Firmware.query.all(),
Architecture.query.filter(Architecture.code != 'noarch').all())
batch = []
for _ in range(size):
firmware, architecture = next(combinations)
batch.append(cls.create(architectures=[architecture], firmware=firmware, **kwargs))
return batch
return super(BuildFactory, cls).create_batch(size, **kwargs)
class DownloadFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Download
id = factory.Sequence(lambda n: n)
build = factory.SubFactory(BuildFactory)
architecture = factory.LazyAttribute(lambda x: x.build.architectures[0])
firmware_build = factory.LazyAttribute(lambda x: random.choice([f.build for f in Firmware.query.all()]))
ip_address = factory.LazyAttribute(lambda x: fake.ipv4())
user_agent = factory.LazyAttribute(lambda x: fake.user_agent())
date = factory.LazyAttribute(lambda x: fake.date_time_this_month())
def populate_db():
"""Populate the database"""
db.session.execute(Architecture.__table__.insert().values([
{'code': 'noarch'}, {'code': 'cedarview'}, {'code': '88f628x'}, {'code': 'qoriq'}
]))
db.session.execute(Firmware.__table__.insert().values([
{'version': '3.1', 'build': 1594}, {'version': '5.0', 'build': 4458}
]))
db.session.execute(Language.__table__.insert().values([
{'code': 'enu', 'name': 'English'}, {'code': 'fre', 'name': 'French'}
]))
db.session.execute(Role.__table__.insert().values([
{'name': 'admin', 'description': 'Administrator'},
{'name': 'package_admin', 'description': 'Package Administrator'},
{'name': 'developer', 'description': 'Developer'}
]))
db.session.execute(Service.__table__.insert().values([
{'code': 'apache-web'},
{'code': 'mysql'}
]))
# Base test case
class BaseTestCase(TestCase):
DEBUG = False
TESTING = True
LOGIN_DISABLED = False
WTF_CSRF_ENABLED = False
DATA_PATH = tempfile.mkdtemp('spkrepo')
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/test.db' % DATA_PATH
CACHE_NO_NULL_WARNING = True
def create_app(self):
return create_app(config=self)
def setUp(self):
if not os.path.exists(self.DATA_PATH):
os.mkdir(self.DATA_PATH)
db.drop_all()
db.create_all()
populate_db()
db.session.commit()
db.session.autoflush = False
def tearDown(self):
db.session.remove()
db.drop_all()
db.session.autoflush = True
shutil.rmtree(self.DATA_PATH)
def login(self, email, password):
"""
Perform a login action
:param email: email of the user
:param password: password of the user
:return: the response
"""
return self.client.post(url_for('security.login'), data=dict(email=email, password=password),
follow_redirects=True)
def logout(self):
"""
Perform a logout action
:return: the response
"""
return self.client.get(url_for('security.logout'), follow_redirects=True)
def create_user(self, *args, **kwargs):
"""
Create a user with the given roles
:param args: role names for the created user
:param kwargs: attributes to pass to the :class:`UserFactory`
:return: the created user
"""
user = UserFactory(roles=[Role.query.filter_by(name=role).one() for role in args], **kwargs)
db.session.commit()
return user
@contextmanager
def logged_user(self, *args, **kwargs):
"""
Create a user with the given roles and perform login action
:param args: role names for the created user
:param kwargs: attributes to pass to the :class:`UserFactory`
:return: the logged user
"""
user = self.create_user(*args, **kwargs)
self.login(user.email, user.password)
yield user
self.logout()
def assert201(self, response, message=None):
"""
Check if response status code is 201
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 201, message)
def assert302(self, response, message=None):
"""
Check if response status code is 302
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 302, message)
def assertRedirectsTo(self, response, location, message=None):
"""
Check if response is a redirect
:param response: Flask response
:param location: the redirect location
:param message: Message to display on test failure
"""
self.assertRedirects(response, location, message)
def assert409(self, response, message=None):
"""
Check if response status code is 409
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 409, message)
def assert422(self, response, message=None):
"""
Check if response status code is 422
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 422, message)
def assertHeader(self, response, header, value, message=None):
"""
Check a response header value
:param response: Flask response
:param header: Header name
:param value: Expected value of the header
:param message: Message to display on test failure
"""
self.assertIn(header, response.headers, message)
self.assertEqual(response.headers[header], value, message)
def create_info(build):
"""
Create a dict to emulate the INFO file of a SPK
:param build: build to use to construct the info dict
:type build: :class:`~spkrepo.models.Build`
:return: the info dict
"""
info = {'package': build.version.package.name, 'version': build.version.version_string,
'arch': ' '.join(Architecture.to_syno.get(a.code, a.code) for a in build.architectures),
'displayname': build.version.displaynames['enu'].displayname,
'description': build.version.descriptions['enu'].description,
'firmware': build.firmware.firmware_string}
if build.version.changelog:
info['changelog'] = build.version.changelog
if build.version.report_url:
info['report_url'] = build.version.report_url
if build.version.distributor:
info['distributor'] = build.version.distributor
if build.version.distributor_url:
info['distributor_url'] = build.version.distributor_url
if build.version.maintainer:
info['maintainer'] = build.version.maintainer
if build.version.maintainer_url:
info['maintainer_url'] = build.version.maintainer_url
if build.version.dependencies:
info['install_dep_packages'] = build.version.dependencies
if build.version.conflicts:
info['install_conflict_packages'] = build.version.conflicts
if build.version.service_dependencies:
info['install_dep_services'] = ':'.join([s.code for s in build.version.service_dependencies])
if build.version.startable is not None:
info['startable'] = 'yes' if build.version.startable else 'no'
for l, d in build.version.displaynames.items():
info['displayname_%s' % l] = d.displayname
for l, d in build.version.descriptions.items():
info['description_%s' % l] = d.description
if build.version.conf_dependencies is not None or build.version.conf_conflicts is not None:
info['support_conf_folder'] = 'yes'
return info
def create_icon(text, size=72):
"""
Create a square icon with some `text` and the given `size`
:param text: text to display in the icon
:param int size: size of the icon
:return: the icon stream
"""
return create_image(text, size, size)
def create_image(text, width=640, height=480):
"""
Create a image with some `text` and the given `width` and `height`
:param text: text to display in the image
:param int width: width of the image
:param int height: height of the image
:return: the image stream
"""
command = ['convert', '-size', '%dx%d' % (width, height), 'canvas:none', '-gravity', 'Center',
'-fill', 'grey', '-draw', 'roundRectangle 0,0 %d,%d 15,15' % (width, height),
'-fill', 'black', '-pointsize', '12', '-draw', 'text 0,0 \'%s\'' % text,
'png:-']
screenshot_stream = io.BytesIO()
process = subprocess.Popen(command, stdout=subprocess.PIPE)
screenshot_stream.write(process.communicate()[0])
screenshot_stream.seek(0)
return screenshot_stream
def create_spk(build, info=None, signature=None, with_checksum=False, with_package_icons=True, with_info_icons=False,
with_info=True, with_package=True, with_scripts=True, with_conf=False, info_encoding='utf-8',
license_encoding='utf-8', signature_encoding='ascii', conf_dependencies_encoding='utf-8',
conf_conflicts_encoding='utf-8', conf_privilege_encoding='utf-8', conf_resource_encoding='utf-8'):
"""
Create a valid SPK file
:param build: base build on which the SPK will be built
:type build: :class:`~spkrepo.models.Build`
:param info: INFO dict or `None` to use the result of :func:`create_info`
:type info: dict or io.BytesIO
:param signature: content of the syno_signature.asc file, if any
:param bool with_checksum: whether to compute the checksum and include it in the INFO
:param bool with_package_icons: whether to include the icons in the SPK
:param bool with_info_icons: whether to include the icons in the INFO
:param bool with_info: whether to include the INFO file
:param bool with_package: whether to include the package.tgz file
:param bool with_scripts: whether to include the scripts folder
:param bool with_conf: whether to include the conf folder
:param info_encoding: encoding for the INFO file
:param license_encoding: encoding for the LICENSE file
:param signature_encoding: encoding for the syno_signature.asc file
:param conf_dependencies_encoding: encoding for the conf/PKG_DEPS file
:param conf_conflicts_encoding: encoding for the conf/PKG_CONX file
:param conf_privilege_encoding: encoding for the conf/privilege file
:param conf_resource_encoding: encoding for the conf/resource file
:return: the created SPK stream
"""
# generate an info if none is given
info = info or create_info(build)
# open structure
spk_stream = io.BytesIO()
spk = tarfile.TarFile(fileobj=spk_stream, mode='w')
# license
if build.version.license:
license_stream = io.BytesIO(build.version.license.encode(license_encoding))
license_tarinfo = tarfile.TarInfo('LICENSE')
license_stream.seek(0, io.SEEK_END)
license_tarinfo.size = license_stream.tell()
license_stream.seek(0)
spk.addfile(license_tarinfo, fileobj=license_stream)
# signature
if signature is not None:
signature_stream = io.BytesIO(signature.encode(signature_encoding))
signature_tarinfo = tarfile.TarInfo('syno_signature.asc')
signature_stream.seek(0, io.SEEK_END)
signature_tarinfo.size = signature_stream.tell()
signature_stream.seek(0)
spk.addfile(signature_tarinfo, fileobj=signature_stream)
# conf
if with_conf or build.version.conf_dependencies is not None or build.version.conf_conflicts or build.version.conf_privilege is not None:
conf_folder_tarinfo = tarfile.TarInfo('conf')
conf_folder_tarinfo.type = tarfile.DIRTYPE
conf_folder_tarinfo.mode = 0o755
spk.addfile(conf_folder_tarinfo)
if build.version.conf_dependencies is not None:
conf_tarinfo = tarfile.TarInfo('conf/PKG_DEPS')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_dependencies))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_dependencies_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
if build.version.conf_conflicts is not None:
conf_tarinfo = tarfile.TarInfo('conf/PKG_CONX')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_conflicts))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_conflicts_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
if build.version.conf_privilege is not None:
conf_tarinfo = tarfile.TarInfo('conf/privilege')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_privilege))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_privilege_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
if build.version.conf_resource is not None:
conf_tarinfo = tarfile.TarInfo('conf/resource')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_resource))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_resource_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
# wizards
wizards = []
if build.version.install_wizard:
wizards.append('install')
if build.version.upgrade_wizard:
wizards.append('upgrade')
if wizards:
wizard_folder_tarinfo = tarfile.TarInfo('WIZARD_UIFILES')
wizard_folder_tarinfo.type = tarfile.DIRTYPE
wizard_folder_tarinfo.mode = 0o755
spk.addfile(wizard_folder_tarinfo)
for wizard in wizards:
wizard_tarinfo = tarfile.TarInfo('WIZARD_UIFILES/%s_uifile' % wizard)
wizard_stream = io.BytesIO(wizard.encode('utf-8'))
wizard_stream.seek(0, io.SEEK_END)
wizard_tarinfo.size = wizard_stream.tell()
wizard_stream.seek(0)
spk.addfile(wizard_tarinfo, fileobj=wizard_stream)
# scripts
if with_scripts:
scripts_folder_tarinfo = tarfile.TarInfo('scripts')
scripts_folder_tarinfo.type = tarfile.DIRTYPE
scripts_folder_tarinfo.mode = 0o755
spk.addfile(scripts_folder_tarinfo)
for script in ('preinst', 'postinst', 'preuninst', 'postuninst', 'preupgrade', 'postupgrade',
'start-stop-status'):
script_tarinfo = tarfile.TarInfo('scripts/%s' % script)
script_stream = io.BytesIO(script.encode('utf-8'))
script_stream.seek(0, io.SEEK_END)
script_tarinfo.size = script_stream.tell()
script_stream.seek(0)
spk.addfile(script_tarinfo, fileobj=script_stream)
# package
if with_package:
package_stream = io.BytesIO()
package = tarfile.TarFile(fileobj=package_stream, mode='w')
unique = '%s-%d-%d-[%s]' % (build.version.package.name, build.version.version, build.firmware.build,
'-'.join(a.code for a in build.architectures))
unique_stream = io.BytesIO(unique.encode('utf-8'))
unique_tarinfo = tarfile.TarInfo('unique')
unique_stream.seek(0, io.SEEK_END)
unique_tarinfo.size = unique_stream.tell()
unique_stream.seek(0)
package.addfile(unique_tarinfo, fileobj=unique_stream)
unique_stream.close()
package.close()
package_tarinfo = tarfile.TarInfo('package.tgz')
package_stream.seek(0, io.SEEK_END)
package_tarinfo.size = package_stream.tell()
package_stream.seek(0)
spk.addfile(package_tarinfo, fileobj=package_stream)
if 'checksum' not in info and with_checksum:
checksum = hashlib.md5()
package_stream.seek(0)
for chunk in iter(lambda: package_stream.read(io.DEFAULT_BUFFER_SIZE), b''):
checksum.update(chunk)
info['checksum'] = checksum.hexdigest().decode('utf-8')
package_stream.close()
# icons
if with_package_icons or with_info_icons:
for size, icon in build.version.icons.items():
with create_icon(build.version.package.name, int(size)) as f:
suffix = '' if size == '72' else '_%s' % size
if with_package_icons:
icon_tarinfo = tarfile.TarInfo('PACKAGE_ICON%s.PNG' % suffix)
f.seek(0, io.SEEK_END)
icon_tarinfo.size = f.tell()
f.seek(0)
spk.addfile(icon_tarinfo, fileobj=f)
if with_info_icons:
f.seek(0)
info['package_icon%s' % suffix] = base64.b64encode(f.read()).decode('utf-8')
# info
if with_info:
if isinstance(info, io.BytesIO):
info_stream = info
else:
b = '\n'.join(['%s="%s"' % (k, v) for k, v in info.items()]).encode(info_encoding)
info_stream = io.BytesIO(b)
info_tarinfo = tarfile.TarInfo('INFO')
info_stream.seek(0, io.SEEK_END)
info_tarinfo.size = info_stream.tell()
info_stream.seek(0)
spk.addfile(info_tarinfo, fileobj=info_stream)
# close structure
spk.close()
spk_stream.seek(0)
return spk_stream
|
{
"content_hash": "92f797ccdd78173c671f08dcd1c4ff44",
"timestamp": "",
"source": "github",
"line_count": 664,
"max_line_length": 140,
"avg_line_length": 41.20933734939759,
"alnum_prop": 0.6299747834667251,
"repo_name": "Diaoul/spkrepo",
"id": "faaf20cb7f1f0b71a5972bcd87987cd5d1057bea",
"size": "27387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spkrepo/tests/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "727"
},
{
"name": "HTML",
"bytes": "15084"
},
{
"name": "JavaScript",
"bytes": "66"
},
{
"name": "Mako",
"bytes": "395"
},
{
"name": "Python",
"bytes": "194364"
},
{
"name": "SaltStack",
"bytes": "4753"
}
],
"symlink_target": ""
}
|
import demistomock as demisto
from Active_Directory_Query import main, group_dn
import socket
import ssl
from threading import Thread
import time
import os
import pytest
import json
from IAMApiModule import *
from unittest.mock import patch
BASE_TEST_PARAMS = {
'server_ip': '127.0.0.1',
'secure_connection': 'None',
'page_size': '500',
'credentials': {'identifier': 'bad', 'password': 'bad'}
}
RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error'
def test_bad_host_no_ssl(mocker):
mocker.patch.object(demisto, 'params',
return_value=BASE_TEST_PARAMS)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('server_ip') == '127.0.0.1'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
@pytest.mark.filterwarnings("ignore::ResourceWarning")
def test_bad_ssl(mocker):
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '185.199.108.153' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = 443
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
mocker.patch.object(demisto, "info")
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' in err_msg
def ssl_bad_socket_server(port):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# cert and keyfile generated with
# openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem
try:
context.load_cert_chain('cert.pem', 'key.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind(('127.0.0.1', port))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
try:
conn, addr = ssock.accept()
except ssl.SSLError as err:
if 'TLSV1_ALERT_UNKNOWN_CA' in str(err):
# all is ok. client refused our cert
return
raise
conn.recv(32)
msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n'
for x in range(10):
msg += msg
conn.send(msg)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except Exception as ex:
pytest.fail("Failed starting ssl_bad_socket_server: {}".format(ex))
raise
@pytest.mark.filterwarnings("ignore::ResourceWarning")
def test_faulty_server(mocker):
port = 9638
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['unsecure'] = True
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
def test_ssl_custom_cert(mocker, request):
ENV_KEY = 'SSL_CERT_FILE'
os.environ[ENV_KEY] = 'cert.pem'
def cleanup():
os.environ.pop(ENV_KEY)
request.addfinalizer(cleanup)
port = 9637
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' not in err_msg
def test_endpoint_entry():
"""
Given:
Custom attributes to filter the computer object entry.
When:
The function filters the computer object according to the custom attributes.
Then:
The function will return all the computer object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import endpoint_entry
custom_attributes_with_asterisk = endpoint_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'Hostname': 'name', 'ID': 'dn', 'Type': 'AD'}
def get_outputs_from_user_profile(user_profile):
entry_context = user_profile.to_entry()
outputs = entry_context.get('Contents')
return outputs
def mock_demisto_map_object(object, mapper_name, incident_type):
email = object.get('email')
email_prefix = email.split('@')[0]
return {
'cn': email_prefix,
'mail': email,
'sAMAccountName': email_prefix,
'userPrincipalName': email_prefix,
"ou": "OU=Americas,OU=Demisto"
}
def test_get_iam_user_profile(mocker):
from Active_Directory_Query import get_iam_user_profile
mocker.patch.object(demisto, 'mapObject', side_effect=mock_demisto_map_object)
user_profile = {"email": "test2@paloaltonetworks.com", "username": "test",
"locationregion": "Americas",
"olduserdata": {"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"}}
_, ad_user, sam_account_name = get_iam_user_profile(user_profile, 'mock_mapper_out')
assert sam_account_name == 'test'
assert ad_user
def test_update_user_iam__username_change(mocker):
"""
Given:
A valid user profile with valid mapping
When:
Running the `create_user_iam` command
Then:
The user was created successfully in AD.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
def modify(self, *args, **kwargs):
return True
def modify_dn(self, *args, **kwargs):
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test2@paloaltonetworks.com", "username": "test",
"locationregion": "Americas",
"olduserdata": {"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"}})}
mocker.patch.object(demisto, 'mapObject', side_effect=mock_demisto_map_object)
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=True)
mocker.patch('Active_Directory_Query.get_user_activity_by_samaccountname', return_value=True)
mocker.patch('Active_Directory_Query.user_dn', return_value='mock_dn')
user_profile = Active_Directory_Query.update_user_iam(
default_base_dn='mock_base_dn',
args=args,
create_if_not_exists=False,
mapper_out='mock_mapper_out',
disabled_users_group_cn='mock_disabled_users_group_cn'
)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('email') == 'test2@paloaltonetworks.com'
assert outputs.get('username') == 'test2'
def test_create_user_iam(mocker):
"""
Given:
A valid user profile with valid mapping
When:
Running the `create_user_iam` command
Then:
The user was created successfully in AD.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
'sAMAccountName': 'test',
'userPrincipalName': 'test',
"ou": "OU=Americas,OU=Demisto"})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
def test_unseccsseful_create_user_iam_missing_ou(mocker):
"""
Given:
A valid user profile with missing ou in the mapping
When:
Running the `create_user_iam` command
Then:
- The user was not created in AD.
- An error message was returned.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
'sAMAccountName': 'test',
'userPrincipalName': 'test'})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
assert 'User must have an Organizational Unit (OU)' in outputs.get('errorMessage')
def test_unseccsseful_create_user_iam_missing_samaccountname(mocker):
"""
Given:
A valid user profile with missing samaccountname in the mapping
When:
Running the `create_user_iam` command
Then:
- The user was not created in AD.
- An error message was returned.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
"ou": "OU=Americas,OU=Demisto",
'userPrincipalName': 'test'})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
assert 'User must have a sAMAccountName' in outputs.get('errorMessage')
def test_group_entry_no_custom_attributes():
"""
Given:
Custom attributes to filter the group object entry.
When:
The function filters the group object according to the custom attributes.
Then:
The function will return all the group object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import group_entry
custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD'}
def test_group_entry():
"""
Given:
Custom attributes to filter the group object entry.
When:
The function filters the group object according to the custom attributes.
Then:
The function will return all the group object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import group_entry
custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf',
'displayName': 'display name'}, ['displayName'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD',
'displayName': 'display name'}
def test_search_group_members(mocker):
"""
sanity test for search_group_members method
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = [EntryMocker()]
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': b'<cookie>'}}}}
def search(self, *args, **kwargs):
time.sleep(1)
return
expected_entry = {
'ActiveDirectory.Groups(obj.dn ==dn)': {'dn': 'dn', 'members': [{'dn': 'dn', 'category': 'group'}]},
'ActiveDirectory.Groups(obj.dn == val.dn)': [{'dn': 'dn', 'memberOf': ['memberOf'], 'name': ['name']}],
'Group': [{'Type': 'AD', 'ID': 'dn', 'Name': ['name'], 'Groups': ['memberOf']}],
'ActiveDirectory(true)': {"GroupsPageCookie": base64.b64encode(b'<cookie>').decode('utf-8')}}
expected_results = {'ContentsFormat': 'json', 'Type': 1,
'Contents': [{'dn': 'dn', 'attributes': {'memberOf': ['memberOf'], 'name': ['name']}}],
'ReadableContentsFormat': 'markdown',
'HumanReadable': '### Active Directory - Get Group Members\n|'
'dn|memberOf|name|\n|---|---|---|\n| dn | memberOf | name |\n',
'EntryContext': expected_entry}
expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}'
mocker.patch.object(demisto, 'args',
return_value={'member-type': 'group', 'group-dn': 'dn', 'time_limit': '1'})
Active_Directory_Query.conn = ConnectionMocker()
with patch('logging.Logger.info') as mock:
Active_Directory_Query.search_group_members('dc', 1)
mock.assert_called_with(expected_results)
def test_group_dn_escape_characters():
"""
Given:
Group name with parentheses
When:
Running the function group_dn
Then:
The function search gets the group name after escape special characters.
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = [EntryMocker()]
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
Active_Directory_Query.conn = ConnectionMocker()
with patch('Active_Directory_Query.search', return_value=[EntryMocker()]) as mock:
group_dn('group(group)', '')
mock.assert_called_with('(&(objectClass=group)(cn=group\\28group\\29))', '')
def test_search__no_control_exist(mocker):
"""
Given:
No control key in the result
When:
Run any search query
Then:
The result return 'no entries' instead of throw exception
"""
import Active_Directory_Query
class ConnectionMocker:
entries = []
result = {}
def search(self, *args, **kwargs):
return
mocker.patch.object(demisto, 'results')
Active_Directory_Query.conn = ConnectionMocker()
Active_Directory_Query.search_users('dc=test,dc=test_1', page_size=20)
assert '**No entries.**' in demisto.results.call_args[0][0]['HumanReadable']
def test_search_attributes_to_exclude(mocker):
"""
Given:
attributes_to_exclude
When:
Run any search query
Then:
The given arguments where excluded from human_readable and context_data
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn"}'
class ConnectionMocker:
entries = [EntryMocker()]
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': b'<cookie>'}}}}
def search(self, *args, **kwargs):
time.sleep(1)
return
expected_results = {'ContentsFormat': 'json', 'Type': 1,
'Contents': [{'dn': 'dn'}],
'ReadableContentsFormat': 'markdown',
'HumanReadable': '### Active Directory - Get Users\n|dn|\n|---|\n| dn |\n',
'EntryContext': {'ActiveDirectory.Users(obj.dn == val.dn)': [{'dn': 'dn'}],
'Account(obj.ID == val.ID)':
[{'Type': 'AD', 'ID': 'dn', 'Email': None, 'Username': None,
'DisplayName': None, 'Managr': None, 'Manager': None, 'Groups': None}],
'ActiveDirectory(true)':
{"UsersPageCookie": base64.b64encode(b'<cookie>').decode('utf-8')}}}
expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}'
mocker.patch.object(demisto, 'args',
return_value={'attributes-to-exclude': "memberOf,name,mail,displayName,"
"manager,sAMAccountName,userAccountControl",
'page-size': '1'})
Active_Directory_Query.conn = ConnectionMocker()
with patch('logging.Logger.info') as mock:
Active_Directory_Query.search_users('dc', 1)
mock.assert_called_with(expected_results)
def test_user_account_to_boolean_fields():
"""
Given:
a userAccountControl value
When:
parsing the userAccountControl fields
Then:
Only the relevant fields will be marked as true
"""
import Active_Directory_Query
fields = Active_Directory_Query.user_account_to_boolean_fields(0x50)
assert {k for k, v in fields.items() if v} == {'LOCKOUT', 'PASSWD_CANT_CHANGE'}
@pytest.mark.parametrize('flags', [512, 0, 544])
def test_restore_user(mocker, flags):
"""
Given:
A disabled user.
When:
Calling restore_user method.
Then:
Verify the existing flag is returned.
"""
from Active_Directory_Query import restore_user
re_val = {'flat': [{'userAccountControl': [flags]}]}
mocker.patch('Active_Directory_Query.search_with_paging', return_value=re_val)
mocker.patch.object(demisto, 'args')
assert restore_user('test_user', 0) == flags
def test_enable_user_with_restore_user_option(mocker):
"""
Given:
A disabled user.
When:
Calling enable_user method.
Then:
Verify the existing flag is returned with the disable bit off.
"""
from Active_Directory_Query import enable_user
disabled_account_with_properties = 546
enabled_account_with_properties = 544
mocker.patch('Active_Directory_Query.restore_user', return_value=disabled_account_with_properties)
mocker.patch('Active_Directory_Query.user_dn', return_value='test_dn')
modify_data = mocker.patch('Active_Directory_Query.modify_object')
mocker.patch.object(demisto, 'args')
enable_user('test_user', 0)
assert modify_data.call_args.args[1].get('userAccountControl')[0][1] == enabled_account_with_properties
def test_search_with_paging_bug(mocker):
"""
Given:
page size larger than 1.
When:
running get-group-members command.
Then:
time_limit results returned.
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': b'<cookie>'}}}}
def search(self, *args, **kwargs):
page_size = kwargs.get('paged_size')
if page_size:
self.entries = [EntryMocker() for i in range(page_size)]
time.sleep(1)
return
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'args',
return_value={'member-type': 'group', 'group-dn': 'dn', 'time_limit': '3'})
Active_Directory_Query.conn = ConnectionMocker()
with patch('logging.Logger.info'):
Active_Directory_Query.search_group_members('dc', 1)
assert len(demisto.results.call_args[0][0]['Contents']) == 3
def test_password_not_expire_missing_username(mocker):
"""
Given:
A demisto args object with missing username and a valid value.
When:
running set_password_not_expire command.
Then:
Verify that a a missing username exception is raised.
"""
from Active_Directory_Query import set_password_not_expire
mocker.patch.object(demisto, 'args', return_value={'username': None, 'value': True})
default_base_dn = {}
with pytest.raises(Exception) as err:
set_password_not_expire(default_base_dn)
assert err.value.args[0] == 'Missing argument - You must specify a username (sAMAccountName).'
|
{
"content_hash": "2a3cbcf414235b045bb80f92e5644385",
"timestamp": "",
"source": "github",
"line_count": 661,
"max_line_length": 118,
"avg_line_length": 37.09833585476551,
"alnum_prop": 0.593181632819509,
"repo_name": "VirusTotal/content",
"id": "beb87186a07304283d83139e6169f602d8fdbfa6",
"size": "24522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from soap import logger
from soap.common import indent
from soap.expression import (
AccessExpr, Expression, FixExpr, OutputVariableTuple, SelectExpr,
UpdateExpr, Variable
)
from soap.semantics.common import is_numeral
from soap.semantics.error import cast
from soap.semantics.functions import expand_expr
from soap.semantics.label import Label
from soap.semantics.state.base import BaseState
class MetaState(BaseState, dict):
__slots__ = ('_hash')
def __init__(self, dictionary=None, **kwargs):
dictionary = dict(dictionary or {}, **kwargs)
mapping = {
self._cast_key(key): self._cast_value(key, value)
for key, value in dictionary.items()
}
super().__init__(mapping)
self._hash = None
def _cast_key(self, key):
if isinstance(key, (Variable, Label, OutputVariableTuple)):
return key
if isinstance(key, str):
var_list = [var for var in self.keys() if var.name == key]
if not var_list:
raise KeyError(key)
if len(var_list) > 1:
raise KeyError('Multiple variables with the same name.')
var = var_list.pop()
return var
raise TypeError(
'Do not know how to convert {!r} into a variable'.format(key))
def _cast_value(self, key, value):
if isinstance(value, (Label, Expression)):
return value
if isinstance(value, str):
from soap.parser import parse
return parse(value)
if isinstance(value, (int, float)) or is_numeral(value):
return cast(value)
if isinstance(value, dict):
return self.__class__(value)
raise TypeError(
'Do not know how to convert {!r} into an expression'.format(value))
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
logger.warning('Variable {} does not exist.'.format(key))
return Variable('__unreachable', key.dtype)
def is_fixpoint(self, other):
raise RuntimeError('Should not be called.')
def widen(self, other):
raise RuntimeError('Should not be called.')
def visit_SkipFlow(self, flow):
return self
def visit_AssignFlow(self, flow):
var, expr = flow.var, flow.expr
if isinstance(var, AccessExpr):
var, subscript = var.var, var.subscript
expr = UpdateExpr(var, subscript, expr)
return self.immu_update(var, expand_expr(expr, self))
def visit_IfFlow(self, flow):
def get(state, var):
expr = state[var]
return expr
bool_expr = expand_expr(flow.conditional_expr, self)
true_state = self.transition(flow.true_flow)
false_state = self.transition(flow.false_flow)
var_list = set(self.keys())
var_list |= set(true_state.keys()) | set(false_state.keys())
mapping = dict(self)
for var in var_list:
true_expr = get(true_state, var)
false_expr = get(false_state, var)
if true_expr == false_expr:
value = true_expr
else:
value = SelectExpr(bool_expr, true_expr, false_expr)
mapping[var] = value
return self.__class__(mapping)
@staticmethod
def _input_vars(meta_state, var):
in_vars = {var}
next_vars = {var}
while next_vars:
var = next_vars.pop()
expr = meta_state.get(var)
if expr is None or is_numeral(expr):
continue
expr_vars = expr.vars()
next_vars |= expr_vars - in_vars
in_vars |= expr_vars
return in_vars
def _visit_loop(self, init_state, bool_expr, loop_flow):
"""
Finds necessary loop variables and loop states for each variable.
"""
bool_expr_vars = bool_expr.vars()
input_vars = loop_flow.vars(output=False) | bool_expr_vars
loop_vars = loop_flow.vars(input=False)
id_state = self.__class__({k: k for k in input_vars | loop_vars})
loop_state = id_state.transition(loop_flow)
loop_map = {}
init_map = {}
for var in loop_vars:
# local loop/init variables
local_loop_vars = self._input_vars(loop_state, var)
local_loop_vars |= bool_expr_vars
# local loop/init states
loop_map[var] = self.__class__(
{k: v for k, v in loop_state.items() if k in local_loop_vars})
init_map[var] = self.__class__(
{k: v for k, v in init_state.items() if k in local_loop_vars})
mapping = dict(init_state)
for var in loop_map:
# fixpoint expression
mapping[var] = FixExpr(
bool_expr, loop_map[var], var, init_map[var])
return self.__class__(mapping)
def visit_WhileFlow(self, flow):
return self._visit_loop(self, flow.conditional_expr, flow.loop_flow)
def visit_ForFlow(self, flow):
init_state = self(flow.init_flow)
return self._visit_loop(
init_state, flow.conditional_expr, flow.loop_flow + flow.incr_flow)
def format(self):
items = []
for k, v in sorted(self.items(), key=str):
if isinstance(v, (Expression, MetaState)):
v = v.format()
items.append('{}: {}'.format(k, v))
items = ', \n'.join(items)
return '{{\n{}}}'.format(indent(items))
def __str__(self):
return self.format().replace(' ', '').replace('\n', '').strip()
def __hash__(self):
if self._hash is None:
self._hash = hash(tuple(sorted(self.items(), key=hash)))
return self._hash
def flow_to_meta_state(flow):
id_state = MetaState({v: v for v in flow.vars(output=False)})
return id_state.transition(flow)
|
{
"content_hash": "6ec2565cdd13b4b43df4a9e6a7a762c8",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 35.260355029585796,
"alnum_prop": 0.5675448900822285,
"repo_name": "admk/soap",
"id": "0d8e1f95f10eefb39149197942fc50ff834f8bd6",
"size": "5959",
"binary": false,
"copies": "1",
"ref": "refs/heads/xitong/master",
"path": "soap/semantics/state/meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Isabelle",
"bytes": "8132"
},
{
"name": "Python",
"bytes": "461377"
},
{
"name": "VHDL",
"bytes": "1728"
}
],
"symlink_target": ""
}
|
from . import dbapi20
import unittest
import pg8000
from .connection_settings import db_connect
class Tests(dbapi20.DatabaseAPI20Test):
driver = pg8000
connect_args = ()
connect_kw_args = db_connect
lower_func = 'lower' # For stored procedure test
def test_nextset(self):
pass
def test_setoutputsize(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "91cf7dd52044de0346c08b64116a7bbc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.6576354679802956,
"repo_name": "realazthat/pg8000",
"id": "62eb1f905e0ce37e8cce15a957efce7ac49c2a7d",
"size": "428",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pg8000/tests/test_pg8000_dbapi20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "298100"
},
{
"name": "Shell",
"bytes": "15010"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from bambu_urlshortener.models import ShortURL
class ShortURLAdmin(admin.ModelAdmin):
list_display = ('url', 'slug', 'visits', 'last_visited')
readonly_fields = ('slug', 'visits', 'last_visited')
admin.site.register(ShortURL, ShortURLAdmin)
|
{
"content_hash": "f47b00a6b38d144b9129b7b04e7d6632",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 35.375,
"alnum_prop": 0.7420494699646644,
"repo_name": "iamsteadman/bambu-urlshortener",
"id": "1935fc5cd672a5d27e840831f25add6c90c67aca",
"size": "283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bambu_urlshortener/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9305"
}
],
"symlink_target": ""
}
|
import datetime
from dateutil import parser as dateparser
EQUALS = 'eq'
NOT_EQUAL = 'neq'
LESS_THAN = 'lt'
LESS_THAN_EQUALS = 'lte'
GREATER_THAN = 'gt'
GREATER_THAN_EQUALS = 'gte'
IN = 'in'
NOT_IN = 'nin'
HAS = 'has'
ALL = (
EQUALS,
NOT_EQUAL,
GREATER_THAN,
GREATER_THAN_EQUALS,
LESS_THAN,
LESS_THAN_EQUALS,
HAS,
IN,
NOT_IN
)
def create_filters_from_request_params(none_values=None, **params):
"""Create filters from REST request parameters.
:param none_values: field names, where the value is required to be None.
:param req_params: REST request parameters.
:return: filters dictionary.
"""
none_values = none_values or []
filters = {}
for column, data in params.items():
if (data is None and column in none_values) or data is not None:
if isinstance(data, str):
f_type, value = extract_filter_type_and_value(data)
create_or_update_filter(column, value, f_type, filters)
else:
create_or_update_filter(column, data, _filter=filters)
return filters
def create_or_update_filter(column, value, filter_type='eq', _filter=None):
"""Create or Update filter.
:param column: Column name by which user want to filter.
:param value: Column value.
:param filter_type: filter type. Filter type can be
'eq', 'neq', 'gt', 'gte', 'lte', 'in',
'lt', 'nin'. Default is 'eq'.
:param _filter: Optional. If provided same filter dictionary will
be updated.
:return: filter dictionary.
"""
if _filter is None:
_filter = {}
_filter[column] = {filter_type: value}
return _filter
def extract_filter_type_and_value(data):
"""Extract filter type and its value from the data.
:param data: REST parameter value from which filter type and
value can be get. It should be in format of
'filter_type:value'.
:return: filter type and value.
"""
if has_filters(data):
filter_type, value = data.split(':', 1)
value = str(value)
if data.startswith((IN, NOT_IN)):
value = list(value.split(","))
else:
value = data
filter_type = EQUALS
return filter_type, value
def has_filters(value):
for filter_type in ALL:
if value.startswith(filter_type + ':'):
return True
return False
def match_filter(obj, attr_name, attr_filter):
# If the attribute doesn't exist we assume that any filter is
# not applicable and we ignore it.
if not hasattr(obj, attr_name):
return True
attr_val = getattr(obj, attr_name)
for op, val in attr_filter.items():
# If the attribute is a date and the given filter value is a string
# we try to convert the filter value into a data as well.
if isinstance(attr_val, datetime.datetime) and isinstance(val, str):
val = dateparser.isoparse(val)
if op not in ALL:
raise ValueError(
'Unknown filter operation encountered [operation=%s]' % op
)
if op == EQUALS and attr_val != val:
return False
if op == NOT_EQUAL and attr_val == val:
return False
if op == LESS_THAN and attr_val >= val:
return False
if op == LESS_THAN_EQUALS and attr_val > val:
return False
if op == GREATER_THAN and attr_val <= val:
return False
if op == GREATER_THAN_EQUALS and attr_val < val:
return False
if op == IN and attr_val not in val:
return False
if op == NOT_IN and attr_val in val:
return False
if op == HAS and val not in attr_val:
return False
return True
def match_filters(obj, filters):
for attr_name, attr_filter in filters.items():
if not match_filter(obj, attr_name, attr_filter):
return False
return True
|
{
"content_hash": "83fbfd85b5a65e09e6ab044633298b2b",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 76,
"avg_line_length": 26.41830065359477,
"alnum_prop": 0.587085601187531,
"repo_name": "openstack/mistral",
"id": "208ebad6cec0ba8f57dd35fd953e4e2f7f0bab68",
"size": "4734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/utils/filter_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2091"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2617595"
},
{
"name": "Shell",
"bytes": "26731"
}
],
"symlink_target": ""
}
|
"""Float class.
Represents an unbounded float using a widget.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from .domwidget import DOMWidget
from .widget import register
from .trait_types import Color
from traitlets import (Unicode, CFloat, Bool, Int, CaselessStrEnum,
Tuple, TraitError, validate)
class _Float(DOMWidget):
value = CFloat(0.0, help="Float value").tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes").tag(sync=True)
description = Unicode(help="Description of the value this widget represents").tag(sync=True)
_model_module = Unicode('jupyter-js-widgets').tag(sync=True)
_view_module = Unicode('jupyter-js-widgets').tag(sync=True)
def __init__(self, value=None, **kwargs):
if value is not None:
kwargs['value'] = value
super(_Float, self).__init__(**kwargs)
class _BoundedFloat(_Float):
max = CFloat(100.0, help="Max value").tag(sync=True)
min = CFloat(0.0, help="Min value").tag(sync=True)
step = CFloat(0.1, help="Minimum step to increment the value (ignored by some views)").tag(sync=True)
@validate('value')
def _validate_value(self, proposal):
"""Cap and floor value"""
value = proposal['value']
if self.min > value or self.max < value:
value = min(max(value, self.min), self.max)
return value
@validate('min')
def _validate_min(self, proposal):
"""Enforce min <= value <= max"""
min = proposal['value']
if min > self.max:
raise TraitError('Setting min > max')
if min > self.value:
self.value = min
return min
@validate('max')
def _validate_max(self, proposal):
"""Enforce min <= value <= max"""
max = proposal['value']
if max < self.min:
raise TraitError('setting max < min')
if max < self.value:
self.value = max
return max
@register('Jupyter.FloatText')
class FloatText(_Float):
""" Displays a float value within a textbox. For a textbox in
which the value must be within a specific range, use BoundedFloatText.
Parameters
----------
value : float
value displayed
description : str
description displayed next to the text box
color : str Unicode color code (eg. '#C13535')
color of the value displayed
"""
_view_name = Unicode('FloatTextView').tag(sync=True)
_model_name = Unicode('FloatTextModel').tag(sync=True)
@register('Jupyter.BoundedFloatText')
class BoundedFloatText(_BoundedFloat):
""" Displays a float value within a textbox. Value must be within the range specified.
For a textbox in which the value doesn't need to be within a specific range, use FloatText.
Parameters
----------
value : float
value displayed
min : float
minimal value of the range of possible values displayed
max : float
maximal value of the range of possible values displayed
description : str
description displayed next to the textbox
color : str Unicode color code (eg. '#C13535')
color of the value displayed
"""
_view_name = Unicode('FloatTextView').tag(sync=True)
_model_name = Unicode('FloatTextModel').tag(sync=True)
@register('Jupyter.FloatSlider')
class FloatSlider(_BoundedFloat):
""" Slider/trackbar of floating values with the specified range.
Parameters
----------
value : float
position of the slider
min : float
minimal position of the slider
max : float
maximal position of the slider
step : float
step of the trackbar
description : str
name of the slider
orientation : {'horizontal', 'vertical'}
default is 'horizontal', orientation of the slider
readout : {True, False}
default is True, display the current value of the slider next to it
readout_format : str
default is '.2f', specifier for the format function used to represent
slider value for human consumption, modeled after Python 3's format
specification mini-language (PEP 3101).
slider_color : str Unicode color code (eg. '#C13535')
color of the slider
color : str Unicode color code (eg. '#C13535')
color of the value displayed (if readout == True)
"""
_view_name = Unicode('FloatSliderView').tag(sync=True)
_model_name = Unicode('FloatSliderModel').tag(sync=True)
orientation = CaselessStrEnum(values=['horizontal', 'vertical'],
default_value='horizontal', help="Vertical or horizontal.").tag(sync=True)
_range = Bool(False, help="Display a range selector").tag(sync=True)
readout = Bool(True, help="Display the current value of the slider next to it.").tag(sync=True)
readout_format = Unicode('.2f', help="Format for the readout").tag(sync=True)
slider_color = Color(None, allow_none=True).tag(sync=True)
continuous_update = Bool(True, help="Update the value of the widget as the user is holding the slider.").tag(sync=True)
@register('Jupyter.FloatProgress')
class FloatProgress(_BoundedFloat):
""" Displays a progress bar.
Parameters
-----------
value : float
position within the range of the progress bar
min : float
minimal position of the slider
max : float
maximal position of the slider
step : float
step of the progress bar
description : str
name of the progress bar
orientation : {'horizontal', 'vertical'}
default is 'horizontal', orientation of the progress bar
bar_style: {'success', 'info', 'warning', 'danger', ''}
color of the progress bar, default is '' (blue)
colors are: 'success'-green, 'info'-light blue, 'warning'-orange, 'danger'-red
"""
_view_name = Unicode('ProgressView').tag(sync=True)
_model_name = Unicode('ProgressModel').tag(sync=True)
orientation = CaselessStrEnum(values=['horizontal', 'vertical'],
default_value='horizontal', help="Vertical or horizontal.").tag(sync=True)
bar_style = CaselessStrEnum(
values=['success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True, help="""Use a predefined styling for
the progess bar.""").tag(sync=True)
class _FloatRange(_Float):
value = Tuple(CFloat(), CFloat(), default_value=(0.0, 1.0),
help="Tuple of (lower, upper) bounds").tag(sync=True)
@property
def lower(self):
return self.value[0]
@lower.setter
def lower(self, lower):
self.value = (lower, self.value[1])
@property
def upper(self):
return self.value[1]
@upper.setter
def upper(self, upper):
self.value = (self.value[0], upper)
@validate('value')
def _validate_value(self, proposal):
lower, upper = proposal['value']
if upper < lower:
raise TraitError('setting lower > upper')
return lower, upper
class _BoundedFloatRange(_FloatRange):
step = CFloat(1.0, help="Minimum step that the value can take (ignored by some views)").tag(sync=True)
max = CFloat(100.0, help="Max value").tag(sync=True)
min = CFloat(0.0, help="Min value").tag(sync=True)
def __init__(self, *args, **kwargs):
min, max = kwargs.get('min', 0.0), kwargs.get('max', 100.0)
if not kwargs.get('value', None):
kwargs['value'] = (0.75 * min + 0.25 * max,
0.25 * min + 0.75 * max)
super(_BoundedFloatRange, self).__init__(*args, **kwargs)
@validate('min', 'max')
def _validate_bounds(self, proposal):
trait = proposal['trait']
new = proposal['value']
if trait.name == 'min' and new > self.max:
raise TraitError('setting min > max')
if trait.name == 'max' and new < self.min:
raise TraitError('setting max < min')
if trait.name == 'min':
self.value = (max(new, self.value[0]), max(new, self.value[1]))
if trait.name == 'max':
self.value = (min(new, self.value[0]), min(new, self.value[1]))
return new
@validate('value')
def _validate_value(self, proposal):
lower, upper = super(_BoundedFloatRange, self)._validate_value(proposal)
lower, upper = min(lower, self.max), min(upper, self.max)
lower, upper = max(lower, self.min), max(upper, self.min)
return lower, upper
@register('Jupyter.FloatRangeSlider')
class FloatRangeSlider(_BoundedFloatRange):
""" Slider/trackbar that represents a pair of floats bounded by minimum and maximum value.
Parameters
----------
value : float tuple
range of the slider displayed
min : float
minimal position of the slider
max : float
maximal position of the slider
step : float
step of the trackbar
description : str
name of the slider
orientation : {'horizontal', 'vertical'}
default is 'horizontal'
readout : {True, False}
default is True, display the current value of the slider next to it
readout_format : str
default is '.2f', specifier for the format function used to represent
slider value for human consumption, modeled after Python 3's format
specification mini-language (PEP 3101).
slider_color : str Unicode color code (eg. '#C13535')
color of the slider
color : str Unicode color code (eg. '#C13535')
color of the value displayed (if readout == True)
"""
_view_name = Unicode('FloatSliderView').tag(sync=True)
_model_name = Unicode('FloatSliderModel').tag(sync=True)
orientation = CaselessStrEnum(values=['horizontal', 'vertical'],
default_value='horizontal', help="Vertical or horizontal.").tag(sync=True)
_range = Bool(True, help="Display a range selector").tag(sync=True)
readout = Bool(True, help="Display the current value of the slider next to it.").tag(sync=True)
slider_color = Color(None, allow_none=True).tag(sync=True)
continuous_update = Bool(True, help="Update the value of the widget as the user is sliding the slider.").tag(sync=True)
|
{
"content_hash": "53835ad2e0cb31e4c1271ef64e13d381",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 123,
"avg_line_length": 37.13768115942029,
"alnum_prop": 0.6344390243902439,
"repo_name": "lancezlin/ml_template_py",
"id": "be574d4ef1157d6deb4d13b1122807fc380c09f0",
"size": "10250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/ipywidgets/widgets/widget_float.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gym
import numpy as np
import tensorflow.compat.v2 as tf
from gym import spaces
from gym.utils import seeding
import dice_rl.utils.common as common_utils
class LowRank(gym.Env):
def __init__(self,
num_states=100,
num_actions=4,
rank=10,
generation_seed=0,
stochastic=False):
self._num_states = num_states
self._num_actions = num_actions
self._rank = rank
self._stochastic = stochastic
self._transitions, self._rewards = self._generate_low_rank(generation_seed)
self.observation_space = spaces.Discrete(self._num_states)
self.action_space = spaces.Discrete(self._num_actions)
self.seed()
self.reset()
def _generate_low_rank(self, seed):
"""Generate a low-rank transition matrix.
Args:
seed: Generation seed.
Returns:
transition matrix of shape S x S' x A
reward of size S
"""
gen_random, _ = seeding.np_random(seed)
if self._stochastic:
transitions = gen_random.uniform(
size=[self._rank * self._num_actions, self._num_states])
transitions /= np.sum(transitions, keepdims=True, axis=-1)
else:
transitions = np.zeros([self._rank * self._num_actions, self._num_states])
next_idx = gen_random.randint(
self._num_states, size=self._rank * self._num_actions)
transitions[np.arange(self._rank * self._num_actions), next_idx] = 1.
transitions = np.transpose(
np.reshape(transitions,
[self._rank, self._num_actions, self._num_states]),
[0, 2, 1])
duplicates = transitions[
gen_random.randint(self._rank, size=self._num_states - self._rank), ...]
transitions = np.concatenate([transitions, duplicates])
gen_random.shuffle(transitions)
u, s, _ = np.linalg.svd(np.reshape(transitions, [self._num_states, -1]))
rewards = np.dot(u[:, :self._rank], gen_random.uniform(size=self._rank))
rewards = (rewards - np.min(rewards)) / (np.max(rewards) - np.min(rewards))
return transitions, rewards
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self._cur_state = self.np_random.randint(self._num_states)
return self._get_obs()
def _get_obs(self):
return self._cur_state
def step(self, action):
self._cur_state = self.np_random.choice(
self._num_states, 1, p=self._transitions[self._cur_state, :, action])[0]
reward = self._rewards[self._cur_state]
done = False
return self._get_obs(), reward, done, {}
def _compute_near_optimal_actions(num_states, num_actions, transitions,
rewards):
vals = np.zeros([num_states, 1])
eps = 0.001
gamma = 0.99
while True:
delta = 0
for state in range(num_states):
tmp = vals[state].copy()
vals[state] = np.max(
np.sum((rewards[state] + gamma * vals) * transitions[state, ...], 0))
delta = np.max([delta, np.abs(tmp - vals[state])])
if delta <= eps * (1 - gamma) / gamma:
break
pi = np.zeros(num_states, dtype=np.int32)
for state in range(num_states):
pi[state] = np.argmax(np.sum(vals * transitions[state, ...], 0))
return pi
def get_low_rank_policy(env,
epsilon_explore=0.0,
py=True,
return_distribution=True):
"""Creates a near-optimal policy for solving the low rank environment.
Args:
env: A low rank environment.
epsilon_explore: Probability of sampling random action as opposed to optimal
action.
py: Whether to return Python policy (NumPy) or TF (Tensorflow).
return_distribution: In the case of a TF policy, whether to return the full
action distribution.
Returns:
A policy_fn that takes in an observation and returns a sampled action along
with a dictionary containing policy information (e.g., log probability).
A spec that determines the type of objects returned by policy_info.
Raises:
ValueError: If epsilon_explore is not a valid probability.
"""
if epsilon_explore < 0 or epsilon_explore > 1:
raise ValueError('Invalid exploration value %f' % epsilon_explore)
near_optimal_actions = _compute_near_optimal_actions(env._num_states,
env._num_actions,
env._transitions,
env._rewards)
policy_distribution = (
np.ones((env._num_states, env._num_actions)) / env._num_actions)
for index, action in enumerate(near_optimal_actions):
policy_distribution[index] *= epsilon_explore
policy_distribution[index, action] += 1 - epsilon_explore
def obs_to_index_fn(observation):
if py:
return np.array(observation, dtype=np.int32)
else:
return tf.cast(observation, tf.int32)
if py:
return common_utils.create_py_policy_from_table(policy_distribution,
obs_to_index_fn)
else:
return common_utils.create_tf_policy_from_table(
policy_distribution,
obs_to_index_fn,
return_distribution=return_distribution)
|
{
"content_hash": "3c86216753790fa474d1c1976d05b964",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 80,
"avg_line_length": 32.98170731707317,
"alnum_prop": 0.6176742466259937,
"repo_name": "google-research/dice_rl",
"id": "d9438668f70e5a87709f84450e4902693017ca30",
"size": "5409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "environments/gridworld/low_rank.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "579847"
},
{
"name": "Shell",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
"""Weight of Evidence"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from category_encoders.ordinal import OrdinalEncoder
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class WOEEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""Weight of Evidence coding for categorical features.
Supported targets: binomial. For polynomial target support, see PolynomialWrapper.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which will assume WOE=0.
handle_unknown: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which will assume WOE=0.
randomized: bool,
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma: float
standard deviation (spread or "width") of the normal distribution.
regularization: float
the purpose of regularization is mostly to prevent division by zero.
When regularization is 0, you may encounter division by zero.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target > 22.5
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = WOEEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Weight of Evidence (WOE) and Information Value Explained, from
https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, randomized=False, sigma=0.05, regularization=1.0):
self.verbose = verbose
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.cols = cols
self.ordinal_encoder = None
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._sum = None
self._count = None
self.random_state = random_state
self.randomized = randomized
self.sigma = sigma
self.regularization = regularization
self.feature_names = None
# noinspection PyUnusedLocal
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and binary y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Binary target values.
Returns
-------
self : encoder
Returns self.
"""
# Unite parameters into pandas types
X, y = util.convert_inputs(X, y)
# The label must be binary with values {0,1}
unique = y.unique()
if len(unique) != 2:
raise ValueError("The target column y must be binary. But the target contains " + str(len(unique)) + " unique value(s).")
if y.isnull().any():
raise ValueError("The target column y must not contain missing values.")
if np.max(unique) < 1:
raise ValueError("The target column y must be binary with values {0, 1}. Value 1 was not found in the target.")
if np.min(unique) > 0:
raise ValueError("The target column y must be binary with values {0, 1}. Value 0 was not found in the target.")
self._dim = X.shape[1]
# If columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
# Training
self.mapping = self._train(X_ordinal, y)
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# Store column names with approximately constant variance on the training data
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data. When the data are used for model training,
it is important to also pass the target in order to apply leave one out.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# Unite the input into pandas types
X, y = util.convert_inputs(X, y, deep=True)
# Then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
if not list(self.cols):
return X
X = self.ordinal_encoder.transform(X)
if self.handle_unknown == 'error':
if X[self.cols].isin([-1]).any().any():
raise ValueError('Unexpected categories found in dataframe')
# Loop over columns and replace nominal values with WOE
X = self._score(X, y)
# Postprocessing
# Note: We should not even convert these columns.
if self.drop_invariant:
X = X.drop(columns=self.drop_cols)
if self.return_df or override_return_df:
return X
else:
return X.values
def _train(self, X, y):
# Initialize the output
mapping = {}
# Calculate global statistics
self._sum = y.sum()
self._count = y.count()
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['sum', 'count']) # Count of x_{i,+} and x_i
# Create a new column with regularized WOE.
# Regularization helps to avoid division by zero.
# Pre-calculate WOEs because logarithms are slow.
nominator = (stats['sum'] + self.regularization) / (self._sum + 2*self.regularization)
denominator = ((stats['count'] - stats['sum']) + self.regularization) / (self._count - self._sum + 2*self.regularization)
woe = np.log(nominator / denominator)
# Ignore unique values. This helps to prevent overfitting on id-like columns.
woe[stats['count'] == 1] = 0
if self.handle_unknown == 'return_nan':
woe.loc[-1] = np.nan
elif self.handle_unknown == 'value':
woe.loc[-1] = 0
if self.handle_missing == 'return_nan':
woe.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
woe.loc[-2] = 0
# Store WOE for transform() function
mapping[col] = woe
return mapping
def _score(self, X, y):
for col in self.cols:
# Score the column
X[col] = X[col].map(self.mapping[col])
# Randomization is meaningful only for training data -> we do it only if y is present
if self.randomized and y is not None:
random_state_generator = check_random_state(self.random_state)
X[col] = (X[col] * random_state_generator.normal(1., self.sigma, X[col].shape[0]))
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError("Estimator has to be fitted to return feature names.")
else:
return self.feature_names
|
{
"content_hash": "9a7c74f9fe527931105afef268c0abda",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 134,
"avg_line_length": 37.034013605442176,
"alnum_prop": 0.5971711976487877,
"repo_name": "scikit-learn-contrib/categorical-encoding",
"id": "a37e2836510824e1972f70a1d13f22ddd034e273",
"size": "10888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "category_encoders/woe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "349423"
},
{
"name": "Shell",
"bytes": "3755"
},
{
"name": "TeX",
"bytes": "3153"
}
],
"symlink_target": ""
}
|
import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*$py.class", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build", "./dist", "EGG-INFO", "*.egg-info"
]
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append((fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
PACKAGE = "eventlog"
NAME = "django-eventlog-ca"
DESCRIPTION = "an event logger - fork of http://github.com/eldarion/eventlog"
AUTHOR = "Jeremy Satterfield, forked from Eldarion"
AUTHOR_EMAIL = "jsatterfield@consumeraffairs.com"
URL = "http://github.com/ConsumerAffairs/eventlog"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=read("README.rst"),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="BSD",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data=find_package_data(PACKAGE, only_in_packages=False),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",
],
setup_requires=[
"django_extensions>=0.9"
],
zip_safe=False
)
|
{
"content_hash": "365efcfd2aab79bbfdadbcf56325b458",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 86,
"avg_line_length": 35.09285714285714,
"alnum_prop": 0.5623855078363525,
"repo_name": "ConsumerAffairs/django-eventlog-ca",
"id": "2389780c0f91c04437a3628845323f74782d7d69",
"size": "4913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22272"
}
],
"symlink_target": ""
}
|
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.poloniex({
#
# ↓ The "proxy" property setting below is for CORS-proxying only!
# Do not use it if you don't know what a CORS proxy is.
# https://github.com/ccxt/ccxt/wiki/Install#cors-access-control-allow-origin
# You should only use the "proxy" setting if you're having a problem with Access-Control-Allow-Origin
# In Python you rarely need to use it, if ever at all.
#
# 'proxy': 'https://cors-anywhere.herokuapp.com/',
#
# ↓ On the other hand, the "proxies" setting is for HTTP(S)-proxying (SOCKS, etc...)
# It is a standard method of sending your requests through your proxies
# This gets passed to the `python-requests` implementation directly
# You can also enable this with environment variables, as described here:
# http://docs.python-requests.org/en/master/user/advanced/#proxies
# The environment variables should be set before importing ccxt (!)
# This is the setting you should be using with synchronous version of ccxt in Python 2 and 3
#
'proxies': {
'http': 'http://10.10.1.10:3128', # no auth
'https': 'https://user:password@10.10.1.10:1080', # with auth
},
})
# your code goes here...
pprint(exchange.fetch_ticker('ETH/BTC'))
|
{
"content_hash": "00c5fdcd9aba9b2138aa91b64dea951b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 105,
"avg_line_length": 39.21621621621622,
"alnum_prop": 0.6815988973121985,
"repo_name": "ccxt/ccxt",
"id": "e9f851504fb2fb8a436ff1c9f681c272b78bac90",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/py/proxy-sync-python-requests-2-and-3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
}
|
import os
import sys
import tempfile
from argparse import ArgumentParser
from typing import Any
from django.conf import settings
from django.core.management.base import CommandError
from zerver.lib.export import export_realm_wrapper
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Message, Reaction
class Command(ZulipBaseCommand):
help = """Exports all data from a Zulip realm
This command exports all significant data from a Zulip realm. The
result can be imported using the `./manage.py import` command.
Things that are exported:
* All user-accessible data in the Zulip database (Messages,
Streams, UserMessages, RealmEmoji, etc.)
* Copies of all uploaded files and avatar images along with
metadata needed to restore them even in the ab
Things that are not exported:
* Confirmation and PreregistrationUser (transient tables)
* Sessions (everyone will need to log in again post-export)
* Users' passwords and API keys (users will need to use SSO or reset password)
* Mobile tokens for APNS/GCM (users will need to reconnect their mobile devices)
* ScheduledEmail (Not relevant on a new server)
* RemoteZulipServer (Unlikely to be migrated)
* third_party_api_results cache (this means rerending all old
messages could be expensive)
Things that will break as a result of the export:
* Passwords will not be transferred. They will all need to go
through the password reset flow to obtain a new password (unless
they intend to only use e.g. Google Auth).
* Users will need to log out and re-log in to the Zulip desktop and
mobile apps. The apps now all have an option on the login page
where you can specify which Zulip server to use; your users
should enter <domain name>.
* All bots will stop working since they will be pointing to the
wrong server URL, and all users' API keys have been rotated as
part of the migration. So to re-enable your integrations, you
will need to direct your integrations at the new server.
Usually this means updating the URL and the bots' API keys. You
can see a list of all the bots that have been configured for
your realm on the `/#organization` page, and use that list to
make sure you migrate them all.
The proper procedure for using this to export a realm is as follows:
* Use `./manage.py deactivate_realm` to deactivate the realm, so
nothing happens in the realm being exported during the export
process.
* Use `./manage.py export` to export the realm, producing a data
tarball.
* Transfer the tarball to the new server and unpack it.
* Use `./manage.py import` to import the realm
* Use `./manage.py reactivate_realm` to reactivate the realm, so
users can log in again.
* Inform the users about the things broken above.
We recommend testing by exporting without having deactivated the
realm first, to make sure you have the procedure right and
minimize downtime.
Performance: In one test, the tool exported a realm with hundreds
of users and ~1M messages of history with --threads=1 in about 3
hours of serial runtime (goes down to ~50m with --threads=6 on a
machine with 8 CPUs). Importing that same data set took about 30
minutes. But this will vary a lot depending on the average number
of recipients of messages in the realm, hardware, etc."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('--output',
dest='output_dir',
action="store",
default=None,
help='Directory to write exported data to.')
parser.add_argument('--threads',
dest='threads',
action="store",
default=settings.DEFAULT_DATA_EXPORT_IMPORT_PARALLELISM,
help='Threads to use in exporting UserMessage objects in parallel')
parser.add_argument('--public-only',
action="store_true",
help='Export only public stream messages and associated attachments')
parser.add_argument('--consent-message-id',
dest="consent_message_id",
action="store",
default=None,
type=int,
help='ID of the message advertising users to react with thumbs up')
parser.add_argument('--upload',
action="store_true",
help="Whether to upload resulting tarball to s3 or LOCAL_UPLOADS_DIR")
parser.add_argument('--delete-after-upload',
action="store_true",
help='Automatically delete the local tarball after a successful export')
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
output_dir = options["output_dir"]
public_only = options["public_only"]
consent_message_id = options["consent_message_id"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="zulip-export-")
else:
output_dir = os.path.realpath(os.path.expanduser(output_dir))
if os.path.exists(output_dir):
if os.listdir(output_dir):
raise CommandError(
f"Refusing to overwrite nonempty directory: {output_dir}. Aborting...",
)
else:
os.makedirs(output_dir)
tarball_path = output_dir.rstrip("/") + ".tar.gz"
try:
os.close(os.open(tarball_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o666))
except FileExistsError:
raise CommandError(f"Refusing to overwrite existing tarball: {tarball_path}. Aborting...")
print(f"\033[94mExporting realm\033[0m: {realm.string_id}")
num_threads = int(options['threads'])
if num_threads < 1:
raise CommandError('You must have at least one thread.')
if public_only and consent_message_id is not None:
raise CommandError('Please pass either --public-only or --consent-message-id')
if consent_message_id is not None:
try:
message = Message.objects.get(id=consent_message_id)
except Message.DoesNotExist:
raise CommandError("Message with given ID does not exist. Aborting...")
if message.last_edit_time is not None:
raise CommandError("Message was edited. Aborting...")
# Since the message might have been sent by
# Notification Bot, we can't trivially check the realm of
# the message through message.sender.realm. So instead we
# check the realm of the people who reacted to the message
# (who must all be in the message's realm).
reactions = Reaction.objects.filter(message=message,
# outbox = 1f4e4
emoji_code="1f4e4",
reaction_type="unicode_emoji")
for reaction in reactions:
if reaction.user_profile.realm != realm:
raise CommandError("Users from a different realm reacted to message. Aborting...")
print(f"\n\033[94mMessage content:\033[0m\n{message.content}\n")
print(f"\033[94mNumber of users that reacted outbox:\033[0m {len(reactions)}\n")
def percent_callback(bytes_transferred: Any) -> None:
sys.stdout.write('.')
sys.stdout.flush()
# Allows us to trigger exports separately from command line argument parsing
export_realm_wrapper(realm=realm, output_dir=output_dir,
threads=num_threads, upload=options['upload'],
public_only=public_only,
delete_after_upload=options["delete_after_upload"],
percent_callback=percent_callback,
consent_message_id=consent_message_id)
|
{
"content_hash": "54894efb40c6c49aab9a5bf28f000888",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 102,
"avg_line_length": 46.675824175824175,
"alnum_prop": 0.6136550912301354,
"repo_name": "brainwane/zulip",
"id": "0226b79e000a3a07f0c29e38cf066cbfdb75e3d0",
"size": "8495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/management/commands/export.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
from collections import OrderedDict
from doctest import DocTestCase
from io import StringIO
from math import ceil
import sys
import time
import traceback
from unittest.result import failfast
from unittest import TestCase
from green.output import Colors, debug
from green.version import pretty_version
# introduced in Python 3
try:
from shutil import get_terminal_size
except ImportError: # pragma: no cover
from backports.shutil_get_terminal_size import get_terminal_size
terminal_width, _ignored = get_terminal_size()
def proto_test(test):
"""
If test is a ProtoTest, I just return it. Otherwise I create a ProtoTest
out of test and return it.
"""
if isinstance(test, ProtoTest):
return test
else:
return ProtoTest(test)
def proto_error(err):
"""
If err is a ProtoError, I just return it. Otherwise I create a ProtoError
out of err and return it.
"""
if isinstance(err, ProtoError):
return err
else:
return ProtoError(err)
class ProtoTest:
"""
I take a full-fledged TestCase and preserve just the information we need
and can pass between processes.
"""
def __init__(self, test=None):
self.module = ""
self.class_name = ""
self.method_name = ""
self.docstr_part = ""
self.subtest_part = ""
self.test_time = "0.0"
# We need to know that this is a doctest, because doctests are very
# different than regular test cases in many ways, so they get special
# treatment inside and outside of this class.
self.is_doctest = False
# Teardown handling is a royal mess
self.is_class_or_module_teardown_error = False
# Is this a subtest?
if getattr(test, "_subDescription", None):
self.subtest_part = " " + test._subDescription()
test = test.test_case
# Is this a DocTest?
if isinstance(test, DocTestCase):
self.is_doctest = True
self.name = test._dt_test.name
# We had to inject this in green/loader.py -- this is the test
# module that specified that we should load doctests from some
# other module -- so that we'll group the doctests with the test
# module that specified that we should load them.
self.module = test.__module__
self.class_name = "DocTests via `doctest_modules = [...]`"
# I'm not sure this will be the correct way to get the method name
# in all cases.
self.method_name = self.name.split(".")[1]
self.filename = test._dt_test.filename
self.lineno = test._dt_test.lineno
# Is this a TestCase?
elif isinstance(test, TestCase):
self.module = test.__module__
self.class_name = test.__class__.__name__
self.method_name = str(test).split()[0]
# docstr_part strips initial whitespace, then combines all lines
# into one string until the first completely blank line in the
# docstring
doc_segments = []
if getattr(test, "_testMethodDoc", None):
for line in test._testMethodDoc.lstrip().split("\n"):
line = line.strip()
if not line:
break
doc_segments.append(line)
self.docstr_part = " ".join(doc_segments)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __hash__(self):
return hash(self.dotted_name)
def __str__(self):
return self.dotted_name
@property
def dotted_name(self, ignored=None):
if self.is_doctest or self.is_class_or_module_teardown_error:
return self.name
return (
self.module
+ "."
+ self.class_name
+ "."
+ self.method_name
+ self.subtest_part
)
def getDescription(self, verbose):
# Classes or module teardown errors
if self.is_class_or_module_teardown_error:
return self.name
# Doctests
if self.is_doctest:
if verbose == 2:
return self.name
elif verbose > 2:
return self.name + " -> " + self.filename + ":" + str(self.lineno)
return ""
# Regular tests
if verbose == 2:
return self.method_name + self.subtest_part
elif verbose == 3:
return (self.docstr_part + self.subtest_part) or self.method_name
elif verbose > 3:
if self.docstr_part + self.subtest_part:
return self.method_name + ": " + self.docstr_part + self.subtest_part
else:
return self.method_name
return ""
class ProtoError:
"""
I take a full-fledged test error and preserve just the information we need
and can pass between processes.
"""
def __init__(self, err=None):
self.traceback_lines = traceback.format_exception(*err)
def __str__(self):
return "\n".join(self.traceback_lines)
class BaseTestResult(object): # Breaks subclasses in 2.7 not inheriting object
"""
I am inherited by ProtoTestResult and GreenTestResult.
"""
def __init__(self, stream, colors):
self.stdout_output = OrderedDict()
self.stderr_errput = OrderedDict()
self.stream = stream
self.colors = colors
def recordStdout(self, test, output):
"""
Called with stdout that the suite decided to capture so we can report
the captured output somewhere.
"""
if output:
test = proto_test(test)
self.stdout_output[test] = output
def recordStderr(self, test, errput):
"""
Called with stderr that the suite decided to capture so we can report
the captured "errput" somewhere.
"""
if errput:
test = proto_test(test)
self.stderr_errput[test] = errput
def displayStdout(self, test):
"""
Displays AND REMOVES the output captured from a specific test. The
removal is done so that this method can be called multiple times
without duplicating results output.
"""
test = proto_test(test)
if test.dotted_name in self.stdout_output:
self.stream.write(
"\n{} for {}\n{}".format(
self.colors.yellow("Captured stdout"),
self.colors.bold(test.dotted_name),
self.stdout_output[test],
)
)
del self.stdout_output[test]
def displayStderr(self, test):
"""
Displays AND REMOVES the errput captured from a specific test. The
removal is done so that this method can be called multiple times
without duplicating results errput.
"""
test = proto_test(test)
if test.dotted_name in self.stderr_errput:
self.stream.write(
"\n{} for {}\n{}".format(
self.colors.yellow("Captured stderr"),
self.colors.bold(test.dotted_name),
self.stderr_errput[test],
)
)
del self.stderr_errput[test]
class ProtoTestResult(BaseTestResult):
"""
I'm the TestResult object for a single unit test run in a process.
"""
def __init__(self, start_callback=None, finalize_callback=None):
super(ProtoTestResult, self).__init__(None, None)
self.start_callback = start_callback
self.finalize_callback = finalize_callback
self.finalize_callback_called = False
self.pickle_attrs = [
"errors",
"expectedFailures",
"failures",
"passing",
"pickle_attrs",
"shouldStop",
"skipped",
"stderr_errput",
"stdout_output",
"unexpectedSuccesses",
"test_time",
]
self.failfast = False # Because unittest inspects the attribute
self.reinitialize()
def reinitialize(self):
self.shouldStop = False
self.errors = []
self.expectedFailures = []
self.failures = []
self.passing = []
self.skipped = []
self.unexpectedSuccesses = []
self.test_time = ""
def __repr__(self): # pragma: no cover
return (
"errors"
+ str(self.errors)
+ ", "
+ "expectedFailures"
+ str(self.expectedFailures)
+ ", "
+ "failures"
+ str(self.failures)
+ ", "
+ "passing"
+ str(self.passing)
+ ", "
+ "skipped"
+ str(self.skipped)
+ ", "
+ "unexpectedSuccesses"
+ str(self.unexpectedSuccesses)
+ ", "
+ "test_time"
+ str(self.test_time)
)
def __getstate__(self):
"""
Prevent the callback functions from getting pickled
"""
result_dict = {}
for pickle_attr in self.pickle_attrs:
result_dict[pickle_attr] = self.__dict__[pickle_attr]
return result_dict
def __setstate__(self, dict):
"""
Since the callback functions weren't pickled, we need to init them
"""
self.__dict__.update(dict)
self.start_callback = None
self.finalize_callback = None
def startTest(self, test):
"""
Called before each test runs
"""
test = proto_test(test)
self.start_time = time.time()
self.reinitialize()
if self.start_callback:
self.start_callback(test)
def stopTest(self, test):
"""
Called after each test runs
"""
self.test_time = str(time.time() - self.start_time)
def finalize(self):
"""
I am here so that after the GreenTestSuite has had a chance to inject
the captured stdout/stderr back into me, I can relay that through to
the worker process's poolRunner who will send me back up to the parent
process.
"""
if self.finalize_callback:
self.finalize_callback(self)
self.finalize_callback_called = True
def addSuccess(self, test):
"""
Called when a test passed
"""
self.passing.append(proto_test(test))
def addError(self, test, err):
"""
Called when a test raises an exception
"""
self.errors.append((proto_test(test), proto_error(err)))
def addFailure(self, test, err):
"""
Called when a test fails a unittest assertion
"""
self.failures.append((proto_test(test), proto_error(err)))
def addSkip(self, test, reason):
"""
Called when a test is skipped
"""
self.skipped.append((proto_test(test), reason))
def addExpectedFailure(self, test, err):
"""
Called when a test fails, and we expected the failure
"""
self.expectedFailures.append((proto_test(test), proto_error(err)))
def addUnexpectedSuccess(self, test):
"""
Called when a test passed, but we expected a failure
"""
self.unexpectedSuccesses.append(proto_test(test))
def addSubTest(self, test, subtest, err):
"""
Called at the end of a subtest no matter its result.
The test that runs the subtests calls the other test methods to
record its own result. We use this method to record each subtest as a
separate test result. It's very meta.
"""
if err is not None:
if issubclass(err[0], test.failureException):
self.addFailure(subtest, err)
else:
self.addError(subtest, err)
class GreenTestResult(BaseTestResult):
"""
Aggregates test results and outputs them to a stream.
"""
def __init__(self, args, stream):
super(GreenTestResult, self).__init__(stream, Colors(args.termcolor))
self.args = args
self.showAll = args.verbose > 1
self.dots = args.verbose == 1
self.verbose = args.verbose
self.last_module = ""
self.last_class = ""
self.first_text_output = ""
self.failfast = args.failfast
self.shouldStop = False
self.testsRun = 0
# Individual lists
self.errors = []
self.expectedFailures = []
self.failures = []
self.passing = []
self.skipped = []
self.unexpectedSuccesses = []
# Combination of all errors and failures
self.all_errors = []
# For exiting non-zero if we don't reach a certain level of coverage
self.coverage_percent = None
def __str__(self): # pragma: no cover
return (
"tests run: {}".format(self.testsRun)
+ ", "
+ "errors"
+ str(self.errors)
+ ", "
+ "expectedFailures"
+ str(self.expectedFailures)
+ ", "
+ "failures"
+ str(self.failures)
+ ", "
+ "passing"
+ str(self.passing)
+ ", "
+ "skipped"
+ str(self.skipped)
+ ", "
+ "unexpectedSuccesses"
+ str(self.unexpectedSuccesses)
)
def stop(self):
self.shouldStop = True
def tryRecordingStdoutStderr(self, test, proto_test_result, err=None):
if proto_test_result.stdout_output.get(test, False):
self.recordStdout(test, proto_test_result.stdout_output[test])
if proto_test_result.stderr_errput.get(test, False):
self.recordStderr(test, proto_test_result.stderr_errput[test])
# SubTest errors/failures (but not successes) generate a different err object, so we have to
# do some inspection to figure out which object has the output/errput
if (test.class_name == "SubTest") and err:
for t in proto_test_result.stdout_output.keys():
if test.dotted_name.startswith(t.dotted_name):
self.recordStdout(test, proto_test_result.stdout_output[t])
break
for t in proto_test_result.stderr_errput.keys():
if test.dotted_name.startswith(t.dotted_name):
self.recordStderr(test, proto_test_result.stderr_errput[t])
break
def addProtoTestResult(self, proto_test_result):
for test, err in proto_test_result.errors:
self.addError(test, err, proto_test_result.test_time)
self.tryRecordingStdoutStderr(test, proto_test_result, err)
for test, err in proto_test_result.expectedFailures:
self.addExpectedFailure(test, err, proto_test_result.test_time)
self.tryRecordingStdoutStderr(test, proto_test_result, err)
for test, err in proto_test_result.failures:
self.addFailure(test, err, proto_test_result.test_time)
self.tryRecordingStdoutStderr(test, proto_test_result, err)
for test in proto_test_result.passing:
self.addSuccess(test, proto_test_result.test_time)
self.tryRecordingStdoutStderr(test, proto_test_result)
for test, reason in proto_test_result.skipped:
self.addSkip(test, reason, proto_test_result.test_time)
self.tryRecordingStdoutStderr(test, proto_test_result)
for test in proto_test_result.unexpectedSuccesses:
self.addUnexpectedSuccess(test, proto_test_result.test_time)
self.tryRecordingStdoutStderr(test, proto_test_result)
def startTestRun(self):
"""
Called once before any tests run
"""
self.startTime = time.time()
# Really verbose information
if self.verbose > 2:
self.stream.writeln(self.colors.bold(pretty_version() + "\n"))
def stopTestRun(self):
"""
Called once after all tests have run
"""
self.stopTime = time.time()
self.timeTaken = self.stopTime - self.startTime
self.printErrors()
if self.args.run_coverage or self.args.quiet_coverage:
from coverage.misc import CoverageException
try:
self.stream.writeln()
self.args.cov.stop()
self.args.cov.save()
self.args.cov.combine()
self.args.cov.save()
if not self.args.quiet_coverage:
self.stream.coverage_percent = None
self.args.cov.report(
file=self.stream,
omit=self.args.omit_patterns,
show_missing=True,
)
self.coverage_percent = self.stream.coverage_percent
except CoverageException as ce:
if (len(ce.args) == 1) and ("No data to report" not in ce.args[0]):
raise ce
if self.testsRun and not self.shouldStop:
self.stream.writeln()
if self.shouldStop:
self.stream.writeln()
self.stream.writeln(
self.colors.yellow("Warning: Some tests may not have been run.")
)
self.stream.writeln()
self.stream.writeln(
"Ran %s test%s in %ss using %s process%s"
% (
self.colors.bold(str(self.testsRun)),
self.testsRun != 1 and "s" or "",
self.colors.bold("%.3f" % self.timeTaken),
self.colors.bold("%d" % self.args.processes),
self.args.processes != 1 and "es" or "",
)
)
self.stream.writeln()
results = [
(self.errors, "errors", self.colors.error),
(self.expectedFailures, "expected_failures", self.colors.expectedFailure),
(self.failures, "failures", self.colors.failing),
(self.passing, "passes", self.colors.passing),
(self.skipped, "skips", self.colors.skipped),
(
self.unexpectedSuccesses,
"unexpected_successes",
self.colors.unexpectedSuccess,
),
]
stats = []
for obj_list, name, color_func in results:
if obj_list:
stats.append("{}={}".format(name, color_func(str(len(obj_list)))))
if not stats:
self.stream.writeln(self.colors.failing("No Tests Found"))
else:
grade = self.colors.passing("OK")
if not self.wasSuccessful():
grade = self.colors.failing("FAILED")
self.stream.writeln("{} ({})".format(grade, ", ".join(stats)))
def startTest(self, test):
"""
Called before the start of each test
"""
# Get our bearings
test = proto_test(test)
current_module = test.module
current_class = test.class_name
# Output
if self.showAll:
# Module...if it changed.
if current_module != self.last_module:
self.stream.writeln(self.colors.moduleName(current_module))
# Class...if it changed.
if current_class != self.last_class:
self.stream.writeln(
self.colors.className(
self.stream.formatText(current_class, indent=1)
)
)
if self.stream.isatty():
# In the terminal, we will write a placeholder, and then
# modify the first character and rewrite it in color after
# the test has run.
self.first_text_output = self.stream.formatLine(
test.getDescription(self.verbose), indent=2
)
self.stream.write(self.colors.bold(self.first_text_output))
self.stream.flush()
# Set state for next time
if current_module != self.last_module:
self.last_module = current_module
if current_class != self.last_class:
self.last_class = current_class
def stopTest(self, test):
"""
Supposed to be called after each test, but as far as I can tell that's a
lie and this is simply never called.
"""
def _reportOutcome(self, test, outcome_char, color_func, err=None, reason=""):
self.testsRun += 1
test = proto_test(test)
if self.showAll:
if self.stream.isatty():
self.stream.write(self.colors.start_of_line())
# Can end up being different from the first time due to subtest
# information only being available after a test result comes in.
second_text_output = self.stream.formatLine(
test.getDescription(self.verbose), indent=2, outcome_char=outcome_char
)
if self.stream.isatty() and terminal_width: # pragma: no cover
cursor_rewind = (
int(ceil(float(len(self.first_text_output)) / terminal_width)) - 1
)
if cursor_rewind:
self.stream.write(self.colors.up(cursor_rewind))
self.stream.write(color_func(second_text_output))
if reason:
self.stream.write(color_func(" -- " + reason))
self.stream.writeln()
self.stream.flush()
elif self.dots:
self.stream.write(color_func(outcome_char))
self.stream.flush()
def addSuccess(self, test, test_time=None):
"""
Called when a test passed
"""
test = proto_test(test)
if test_time:
test.test_time = str(test_time)
self.passing.append(test)
self._reportOutcome(test, ".", self.colors.passing)
@failfast
def addError(self, test, err, test_time=None):
"""
Called when a test raises an exception
"""
test = proto_test(test)
if test_time:
test.test_time = str(test_time)
err = proto_error(err)
self.errors.append((test, err))
self.all_errors.append((test, self.colors.error, "Error", err))
self._reportOutcome(test, "E", self.colors.error, err)
@failfast
def addFailure(self, test, err, test_time=None):
"""
Called when a test fails a unittest assertion
"""
# Special case: Catch Twisted's skips that come thtrough as failures
# and treat them as skips instead
if len(err.traceback_lines) == 1:
if err.traceback_lines[0].startswith("UnsupportedTrialFeature"):
reason = eval(err.traceback_lines[0][25:])[1]
self.addSkip(test, reason)
return
test = proto_test(test)
if test_time:
test.test_time = str(test_time)
err = proto_error(err)
self.failures.append((test, err))
self.all_errors.append((test, self.colors.error, "Failure", err))
self._reportOutcome(test, "F", self.colors.failing, err)
def addSkip(self, test, reason, test_time=None):
"""
Called when a test is skipped
"""
test = proto_test(test)
if test_time:
test.test_time = str(test_time)
self.skipped.append((test, reason))
self._reportOutcome(test, "s", self.colors.skipped, reason=reason)
def addExpectedFailure(self, test, err, test_time=None):
"""
Called when a test fails, and we expected the failure
"""
test = proto_test(test)
if test_time:
test.test_time = str(test_time)
err = proto_error(err)
self.expectedFailures.append((test, err))
self._reportOutcome(test, "x", self.colors.expectedFailure, err)
def addUnexpectedSuccess(self, test, test_time=None):
"""
Called when a test passed, but we expected a failure
"""
test = proto_test(test)
if test_time:
test.test_time = str(test_time)
self.unexpectedSuccesses.append(test)
self._reportOutcome(test, "u", self.colors.unexpectedSuccess)
def printErrors(self):
"""
Print a list of all tracebacks from errors and failures, as well as
captured stdout (even if the test passed, except with quiet_stdout
option).
"""
if self.dots:
self.stream.writeln()
# Skipped Test Report
if not self.args.no_skip_report:
for test, reason in self.skipped:
self.stream.writeln(
"\n{} {} - {}".format(
self.colors.blue("Skipped"),
self.colors.bold(test.dotted_name),
reason,
)
)
# Captured output for non-failing tests
if not self.args.quiet_stdout:
failing_tests = set([x[0] for x in self.all_errors])
for test in list(self.stdout_output) + list(self.stderr_errput):
if test not in failing_tests:
self.displayStdout(test)
self.displayStderr(test)
# Actual tracebacks and captured output for failing tests
for (test, color_func, outcome, err) in self.all_errors:
# Header Line
self.stream.writeln(
"\n" + color_func(outcome) + " in " + self.colors.bold(test.dotted_name)
)
# Traceback
if not self.args.no_tracebacks:
relevant_frames = []
for i, frame in enumerate(err.traceback_lines):
# Python2 tracebacks containing unicode need some special handling
# This doesn't always make it readable, but at least it doesn't
# crash
if sys.version_info[0] == 2: # pragma: no cover
try:
"".join([frame]) # intentionally trigger exceptions
except UnicodeDecodeError:
frame = frame.decode("utf-8")
debug(
"\n"
+ "*" * 30
+ "Frame {}:".format(i)
+ "*" * 30
+ "\n{}".format(self.colors.yellow(frame)),
level=3,
)
# Ignore useless frames
if self.verbose < 4:
if frame.strip() == "Traceback (most recent call last):":
continue
# Done with this frame, capture it.
relevant_frames.append(frame)
self.stream.write("".join(relevant_frames))
# Captured output for failing tests
self.displayStdout(test)
self.displayStderr(test)
def wasSuccessful(self):
"""
Tells whether or not the overall run was successful
"""
if self.args.minimum_coverage != None:
if self.coverage_percent < self.args.minimum_coverage:
self.stream.writeln(
self.colors.red(
"Coverage of {}% is below minimum level of {}%".format(
self.coverage_percent, self.args.minimum_coverage
)
)
)
return False
# fail if no tests are run.
if (
sum(
len(x)
for x in [
self.errors,
self.expectedFailures,
self.failures,
self.passing,
self.skipped,
self.unexpectedSuccesses,
]
)
== 0
):
return False
else:
return len(self.all_errors) + len(self.unexpectedSuccesses) == 0
|
{
"content_hash": "94e626d34e989baf3f2f8b0efefe7554",
"timestamp": "",
"source": "github",
"line_count": 801,
"max_line_length": 100,
"avg_line_length": 35.449438202247194,
"alnum_prop": 0.5440394435640078,
"repo_name": "CleanCut/green",
"id": "457504c5295924e2d88f532a72766b2f7bf0cbe7",
"size": "28395",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "green/result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3377"
},
{
"name": "Python",
"bytes": "303401"
},
{
"name": "Shell",
"bytes": "4347"
}
],
"symlink_target": ""
}
|
from flask_wtf.csrf import CsrfProtect
csrf = CsrfProtect()
|
{
"content_hash": "5bce2de9822eb54c86694432391d9775",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7868852459016393,
"repo_name": "bitmotive/flask-boilerplate",
"id": "7a1b09ba24d295c821a0b4c6621c5ef93dcf3fc5",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/csrf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "96"
},
{
"name": "Python",
"bytes": "12173"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
}
|
import pushmanager.core.util
import tornado.gen
import tornado.web
from pushmanager.core.requesthandler import RequestHandler
class RequestServlet(RequestHandler):
@tornado.web.asynchronous
@tornado.web.authenticated
@tornado.gen.engine
def get(self):
request_id = pushmanager.core.util.get_int_arg(self.request, 'id')
if not request_id:
self.send_error(404)
response = yield tornado.gen.Task(
self.async_api_call,
"request",
{'id': request_id}
)
req = self.get_api_results(response)
if not req:
self.send_error()
self.render("request.html", page_title="Request #%d" % request_id, req=req)
|
{
"content_hash": "6ca7a8131464e2fd9cbbebc010b3195b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 83,
"avg_line_length": 28.74074074074074,
"alnum_prop": 0.5889175257731959,
"repo_name": "YelpArchive/pushmanager",
"id": "29c4d212e869f33e17a236e424bcdf15c979fc50",
"size": "776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pushmanager/servlets/request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15672"
},
{
"name": "HTML",
"bytes": "33606"
},
{
"name": "JavaScript",
"bytes": "33757"
},
{
"name": "Makefile",
"bytes": "372"
},
{
"name": "PLpgSQL",
"bytes": "3157"
},
{
"name": "Python",
"bytes": "388878"
},
{
"name": "Shell",
"bytes": "1646"
}
],
"symlink_target": ""
}
|
from __future__ import division
import sys
import numpy as np
import scipy as sp
import scipy.optimize as opt
import time as time
from matplotlib import pyplot as plt
#DEMOGRAPHICS FUNCTIONS
def getDemographics(params, PrintAges, DiffDemog):
"""
Description:
-Imports data from csv files for initial populations, fertility rates, mortality rates, and net migrants.
-Stores these data sets in their respective matrices, and calculates population distribuitons through year T.
-NOTE: FOR NOW THIS FUNCTION ONLY USES DATA FOR THE USA. NEEDS TO EVENTUALLY ADD MORE COUNTRIES
Inputs:
-None, but uses the global variables T, T_1, StartFertilityAge, EndFertilityAge, StartDyingAge, and MaxImmigrantAge
Objects in Function:
-USAPopdata: (S+1) vector that has the initial population of the U.S straight from the csv
-USAFertdata: (T_1,EndFertilityAge+2-StartFertilityAge) vector that has U.S. fertility straight from the csv
-USAMortdata: (T_1,S+1-StartDyingAge) vector that has U.S. mortality straight from the csv
-USAMigdata: (MaxImmigrantAge) vector that contains the number of net U.S. migrants straight from the csv
-g_N: (T) vector that contains the exogenous population growth rates
-g_A: Constant that represents the technical growth rate
-l_endowment: (T) vector labor endowment per household
-f_bar: (I) vector that represents the fertility rate after period T_1
-p_bar: (I) vector that represents the mortality rate after period T_1
-m_bar: (I) vector that represents the immigration rate after period T_1
Output:
-FertilityRates: Numpy array that contains fertilty rates for all countries, ages, and years
-MortalityRates: Numpy array that contains mortality rates for all countries, ages, and years
-Migrants: Numpy array that contains net migration for all countries and ages
-N_matrix: Numpy array that contains population numbers for all countries, ages, and years
-Nhat matrix: Numpy array that contains the population percentage for all countries, ages, and years
"""
I, S, T, T_1, StartFertilityAge, EndFertilityAge, StartDyingAge, MaxImmigrantAge, g_A = params
#Initializes demographics matrices
N_matrix = np.zeros((I, S+1, T+S+1))
Nhat_matrix = np.zeros((I, S+1, T+S+1))
#N_temp = np.zeros((I, S+1, T+S+1))
FertilityRates = np.zeros((I, S+1, T+S+1))
MortalityRates = np.zeros((I, S+1, T+S+1))
Migrants = np.zeros((I, S+1, T+S+1))
g_N = np.zeros(T+S+1)
if PrintAges:
print "T =", T
print "T_1", T_1
print "StartFertilityAge", StartFertilityAge
print "EndFertilityAge", EndFertilityAge
print "StartDyingAge", StartDyingAge
print "MaxImmigrantAge", MaxImmigrantAge
if DiffDemog:
if I > 7:
sys.exit("ERROR!!! We can't have more than 7 Countries without unique data. Change either parameter I so it is less than 8 or change DiffDemog to False")
countrynames = ["usa", "eu", "japan", "china", "india", "russia", "korea"]
for i in range(I):
#print "Got demographics for", countrynames[i]
N_matrix[i,:,0] = np.loadtxt(("Data_Files/population.csv"),delimiter=',',skiprows=1, usecols=[i+1])[:S+1]*1000
FertilityRates[i,StartFertilityAge:EndFertilityAge+1,:T_1] = np.transpose(np.loadtxt(str("Data_Files/" + countrynames[i] + "_fertility.csv"),delimiter=',',skiprows=1, usecols=range(1,EndFertilityAge+2-StartFertilityAge))[48:48+T_1,:])
MortalityRates[i,StartDyingAge:-1,:T_1] = np.transpose(np.loadtxt(str("Data_Files/" + countrynames[i] + "_mortality.csv"),delimiter=',',skiprows=1, usecols=range(1,S+1-StartDyingAge))[:T_1,:])
Migrants[i,:MaxImmigrantAge,:T_1] = np.einsum("s,t->st",np.loadtxt(("Data_Files/net_migration.csv"),delimiter=',',skiprows=1, usecols=[i+1])[:MaxImmigrantAge]*100, np.ones(T_1))
else:
#Imports and scales data for the USA. Imports a certain number of generations according to the value of S
USAPopdata = np.loadtxt(("Data_Files/population.csv"),delimiter=',',skiprows=1, usecols=[1])[:S+1]*1000
USAFertdata = np.loadtxt(("Data_Files/usa_fertility.csv"),delimiter=',',skiprows=1, usecols=range(1,EndFertilityAge+2-StartFertilityAge))[48:48+T_1,:]
USAMortdata = np.loadtxt(("Data_Files/usa_mortality.csv"),delimiter=',',skiprows=1, usecols=range(1,S+1-StartDyingAge))[:T_1,:]
USAMigdata = np.loadtxt(("Data_Files/net_migration.csv"),delimiter=',',skiprows=1, usecols=[1])[:MaxImmigrantAge]*100
#NOTE: For now we set fertility, mortality, number of migrants, and initial population the same for all countries.
#Sets initial total population
N_matrix[:,:,0] = np.tile(USAPopdata, (I, 1))
#Fertility Will be equal to 0 for all ages that don't bear children
FertilityRates[:,StartFertilityAge:EndFertilityAge+1,:T_1] = np.einsum("ts,i->ist", USAFertdata, np.ones(I))
#Mortality be equal to 0 for all young people who aren't old enough to die
MortalityRates[:,StartDyingAge:-1,:T_1] = np.einsum("ts,it->ist", USAMortdata, np.ones((I,T_1)))
#The number of migrants is the same for each year
Migrants[:,:MaxImmigrantAge,:T_1] = np.einsum("s,it->ist", USAMigdata, np.ones((I,T_1)))
Nhat_matrix[:,:,0] = N_matrix[:,:,0]/np.sum(N_matrix[:,:,0])
N_temp = np.ones((I, S+1))/(I*S)
#The last generation dies with probability 1
MortalityRates[:,-1,:] = np.ones((I, T+S+1))
#Gets steady-state values
f_bar = FertilityRates[:,:,T_1-1]
p_bar = MortalityRates[:,:,T_1-1]
m_bar = Migrants[:,:,T_1-1]
#Set to the steady state for every year beyond year T_1
FertilityRates[:,:,T_1:] = np.tile(np.expand_dims(f_bar, axis=2), (1,1,T-T_1+S+1))
MortalityRates[:,:,T_1:] = np.tile(np.expand_dims(p_bar, axis=2), (1,1,T-T_1+S+1))
Migrants[:,:,T_1:] = np.tile(np.expand_dims(m_bar, axis=2), (1,1,T-T_1+S+1))
#Gets the initial immigration rate
ImmigrationRate = Migrants[:,:,0]/N_matrix[:,:,0]
#Gets initial world population growth rate
g_N[0] = 0.
#Calculates population numbers for each country
for t in range(1,T+S+1):
#Gets the total number of children and and percentage of children and stores them in generation 0 of their respective matrices
#See equations 2.1 and 2.10
N_matrix[:,0,t] = np.sum((N_matrix[:,:,t-1]*FertilityRates[:,:,t-1]),axis=1)
N_temp[:,0] = np.sum((Nhat_matrix[:,:,t-1]*FertilityRates[:,:,t-1]),axis=1)
#Finds the immigration rate for each year
ImmigrationRate = Migrants[:,:,t-1]/N_matrix[:,:,t-1]
#Gets the population distribution and percentage of population distribution for the next year, taking into account immigration and mortality
#See equations 2.2 and 2.11
N_matrix[:,1:,t] = N_matrix[:,:-1,t-1]*(1+ImmigrationRate[:,:-1]-MortalityRates[:,:-1,t-1])
Nhat_matrix[:,:,t] = N_matrix[:,:,t]/np.sum(N_matrix[:,:,t])
N_temp[:,1:] = Nhat_matrix[:,:-1,t-1]*(1+ImmigrationRate[:,:-1]-MortalityRates[:,:-1,t-1])
#Gets the growth rate for the next year
g_N[t] = np.sum(N_temp[:,:])-1
#Gets labor endowment per household. For now it grows at a constant rate g_A
l_endowment = np.cumsum(np.ones(T)*g_A)
return FertilityRates, MortalityRates, Migrants, N_matrix, Nhat_matrix
def plotDemographics(params, index, years, name, N_matrix):
"""
Description:
Plots the population distribution of a given country for any number of specified years
Inputs:
index: Integer that indicates which country to plot
years: List that contains each year to plot
name: String of the country's name. Used in the legend of the plot
Outputs:
None
"""
S, T = params
for y in range(len(years)):
yeartograph = years[y]
#Checks to make sure we haven't requested to plot a year past the max year
if yeartograph <= T:
plt.plot(range(S+1), N_matrix[index,:,yeartograph])
else:
print "\nERROR: WE HAVE ONLY SIMULATED UP TO THE YEAR", T
time.sleep(15)
plt.title(str(name + " Population Distribution"))
plt.legend(years)
plt.show()
plt.clf()
def getBequests(params, assets_old):
"""
Description:
-Gets the value of the bequests given to each generation
Inputs:
-assets: Assets for each generation in a given year
-current_t: Integer that indicates the current year. Used to pull information from demographics global matrices like FertilityRates
Objects in Function:
-BQ: T
-num_bequest_receivers:
-bq_Distribution:
Output:
-bq: Numpy array that contains the number of bequests for each generation in each country.
"""
I, S, T, StartFertilityAge, StartDyingAge, pop_old, pop_working, current_mort = params
#Initializes bequests
bq = np.zeros((I, S+1))
#Gets the total assets of the people who died this year
BQ = np.sum(assets_old*current_mort*pop_old, axis=1)
#Distributes the total assets equally among the eligible population for each country
#NOTE: This will likely change as we get a more complex function for distributing the bequests
num_bequest_receivers = np.sum(pop_working, axis=1)
bq_Distribution = BQ/num_bequest_receivers
bq[:,StartFertilityAge:StartDyingAge+1] = np.einsum("i,s->is", bq_Distribution, np.ones(StartDyingAge+1-StartFertilityAge))
return bq
def hatvariables(Kpathreal, kfpathreal, Nhat_matrix):
#THIS FUNCTION HAS EQUATIONS 2.13-2.16 AND 2.19-2.20, BUT STILL NEEDS TO BE INCORPORATED INTO THE REST OF THE MODEL TO COMPLETELY TEST
#We are only using up until T periods rather than T+S+1 since Nhat only goes out to T
Kpath = Kpathreal[:,:T]
kfpath = kfpathreal[:,:T]
temp_e = np.ones((I, S+1, T))#THIS SHOULD ONLY BE UNTIL WE GET S GENERATIONS RATHER THAN S-1
n = np.sum(temp_e[:,:,:T]*Nhat_matrix, axis=1)
Ypath = (Kpath**alpha) * (np.einsum("i,it->it", A, n)**(1-alpha))
rpath = alpha * Ypath / Kpath
wpath = (1-alpha) * Ypath / n
"""
#NOTE:This goes in the get_householdchoices_path function
c_path = np.zeros((I, S))
asset_path = np.zeros((I, S+1))
c_path[:,0] = c_1
asset_path[:,0] = starting_assets
for s in range(1,S):
c_path[:,s] = ((beta * (1 + rpath_chunk[:,s] - delta))**(1/sigma) * c_path[:,s-1])/np.exp(g_A)
asset_path[:,s] = (wpath_chunk[:,s]*e[:,0,s-1] + (1 + rpath_chunk[:,s-1] - delta)*asset_path[:,s-1] + bq_chunk - c_path[:,s-1])/np.exp(g_A)
asset_path[:,s+1] = wpath_chunk[:,s]*e_chunk[:,s] + (1 + rpath_chunk[:,s] - delta)*asset_path[:,s] - c_path[:,s]
"""
#STEADY STATE FUNCTIONS
def get_kd(assets, kf):
"""
Description: Calculates the amount of domestic capital that remains in the domestic country
Inputs:
-assets[I,S,T+S+1]: Matrix of assets
-kf[I,T+S+1]: Domestic capital held by foreigners.
Objects in Function:
NONE
Outputs:
-kd[I,T+S+1]: Capital that is used in the domestic country
"""
kd = np.sum(assets[:,1:-1], axis=1) - kf
return kd
def get_n(e):
"""
Description: Calculates the total labor productivity for each country
Inputs:
-e[I,S,T]:Matrix of labor productivities
Objects in Function:
-NONE
Outputs:
-n[I,S+T+1]: Total labor productivity
"""
n = np.sum(e, axis=1)
return n
def get_Y(params, kd, n):
"""
Description:Calculates the output timepath
Inputs:
-params (2) tuple: Contains the necessary parameters used
-kd[I,T+S+1]: Domestic held capital stock
-n[I,S+T+1]: Summed labor productivity
Objects in Function:
-A[I]: Technology for each country
-alpha: Production share of capital
Outputs:
-Y[I,S+T+1]: Timepath of output
"""
alpha, A = params
if kd.ndim == 1:
Y = (kd**alpha) * ((A*n)**(1-alpha))
elif kd.ndim == 2:
Y = (kd**alpha) * (np.einsum("i,is->is", A, n)**(1-alpha))
return Y
def get_r(alpha, Y, kd):
"""
Description: Calculates the rental rates.
Inputs:
-alpha (scalar): Production share of capital
-Y[I,T+S+1]: Timepath of output
-kd[I,T+S+1]: Timepath of domestically owned capital
Objects in Function:
-NONE
Outputs:
-r[I,R+S+1]:Timepath of rental rates
"""
r = alpha * Y / kd
return r
def get_w(alpha, Y, n):
"""
Description: Calculates the wage timepath.
Inputs:
-alpha (scalar): Production share of output
-Y[I,T+S+1]: Output timepath
-n[I,T+S+1]: Total labor productivity timepath
Objects in Function:
-NONE
Outputs:
-w[I,T+S+1]: Wage timepath
"""
w = (1-alpha) * Y / n
return w
def get_cvecss(params, w, r, assets):
"""
Description: Calculates the consumption vector
Inputs:
-params (tuple 2): Tuple that containts the necessary parameters
-w[I,T+S+1]: Wage timepath
-r[I,T+S+1]: Rental Rate timepath
-assets[I,S,T+S+1]: Assets timepath
Objects in Function:
-e[I,S,T+S+1]: Matrix of labor productivities
-delta (parameter): Depreciation rate
Outputs:
-c_vec[I,T+S+1]:Vector of consumption.
"""
e, delta = params
c_vec = np.einsum("i, is -> is", w, e[:,:,0])\
+ np.einsum("i, is -> is",(1 + r - delta) , assets[:,:-1])\
- assets[:,1:]
return c_vec
def check_feasible(K, Y, w, r, c):
"""
Description:Checks the feasibility of the inputs.
Inputs:
-K[I,T+S+1]: Capital stock timepath
-y[I,T+S+1]: Output timepath
-w[I,T+S+1]: Wage timepath
-r[I,T+S+1]: Rental rate timepath
-c[I,T+S+1]: consumption timepath
Objects in Function:
NONE
Outputs:
-Feasible (Boolean): Whether or not the inputs are feasible.
"""
Feasible = True
if np.any(K<0):
Feasible=False
print "WARNING! INFEASABLE VALUE ENCOUNTERED IN K!"
print "The following coordinates have infeasible values:"
print np.argwhere(K<0)
if np.any(Y<0):
Feasible=False
print "WARNING! INFEASABLE VALUE ENCOUNTERED IN Y!"
print "The following coordinates have infeasible values:"
print np.argwhere(Y<0)
if np.any(r<0):
Feasible=False
print "WARNING! INFEASABLE VALUE ENCOUNTERED IN r!"
print "The following coordinates have infeasible values:"
print np.argwhere(r<0)
if np.any(w<0):
Feasible=False
print "WARNING! INFEASABLE VALUE ENCOUNTERED IN w!"
print "The following coordinates have infeasible values:"
print np.argwhere(w<0)
if np.any(c<0):
Feasible=False
print "WARNING! INFEASABLE VALUE ENCOUNTERED IN c_vec!"
print "The following coordinates have infeasible values:"
print np.argwhere(c<0)
return Feasible
def SteadyStateSolution(guess, I, S, beta, sigma, delta, alpha, e, A):
"""
Description:
-This is the function that will be optimized by fsolve.
Inputs:
-guess[I,S+1]: vector that pieced together from assets and kf.
Objects in Function:
-kf[I,]:Foreign capital held by foreigners in each country
-assets[I,S]: Asset path for each country
-k[I,]:Capital for each country
-n[I,]:Labor for each country
-Y[I,]:Output for each country
-r[I,]:Rental Rate for each country
-w[I,]:Wage for each country
-c_vec[I, S]: Consumption by cohort in each country
-Euler_c[I, S-1]: Corresponds to (1.16)
-Euler_r[I,]: Corresponds to (1.17)
-Euler_kf(Scalar): Corresponds to (1.18)
Output:
-all_Euler[I*S,]: Similar to guess, it's a vector that's has both assets and kf.
"""
#Takes a 1D guess of length I*S and reshapes it to match what the original input into the fsolve looked like since fsolve flattens numpy arrays
guess = np.reshape(guess[:,np.newaxis], (I, S))
#Appends a I-length vector of zeros on ends of assets to represent no assets when born and no assets when dead
assets = np.column_stack((np.zeros(I), guess[:,:-1], np.zeros(I)))
#Sets kf as the last element of the guess vector for each country
kf = guess[:,-1]
#Getting the other variables
kd = get_kd(assets, kf)
n = get_n(e[:,:,0])
Yparams = (alpha, A)
Y = get_Y(Yparams, kd, n)
r = get_r(alpha, Y, kd)
w = get_w(alpha, Y, n)
cparams = (e, delta)
c_vec = get_cvecss(cparams, w, r, assets)
K = kd+kf
Feasible = check_feasible(K, Y, w, r, c_vec)
if np.any(c_vec<0): #Punishes the the poor choice of negative values in the fsolve
all_Euler=np.ones((I*S-1))*9999.
else:
#Gets Euler equations
Euler_c = c_vec[:,:-1] ** (-sigma) - beta * c_vec[:,1:] ** (-sigma) * (1 + r[0] - delta)
Euler_r = r[1:] - r[0]
Euler_kf = np.sum(kf)
#Makes a new 1D vector of length I*S that contains all the Euler equations
all_Euler = np.append(np.append(np.ravel(Euler_c), np.ravel(Euler_r)), Euler_kf)
return all_Euler
def getSteadyState(params, assets_init, kf_init):
"""
Description:
This takes the initial guess for assets and kf. Since the function
returns a matrix, this unpacks the individual parts.
Inputs:
-assets_init[I,S-1]:Intial guess for asset path
-kf_init[I]:Initial guess on foreigner held capital
Objects in Function:
-guess[I,S]: A combined matrix that has both assets_init and kf_init
-ss[S*I,]: The result from optimization.
Outputs:
-assets_ss[I,S-1]:Calculated assets steady state
-kf_ss[I,]:Calculated domestic capital owned by foreigners steady state
-k_ss[I]: Calculated total capital stock steady state
-n_ss[I]: Summed labor productivities steady state
-y_ss[I]: Calculated output steady state
-r_ss[I]: calculated steady state rental rate
-w_ss[I]: calculated steady state wage rate
-c_vec_ss[I, S]: Calculated steady state counsumption
"""
I, S, beta, sigma, delta, alpha, e, A = params
#Merges the assets and kf together into one matrix that can be inputted into the fsolve function
guess = np.column_stack((assets_init, kf_init))
#Solves for the steady state
solver_params = (I, S, beta, sigma, delta, alpha, e, A)
ss = opt.fsolve(SteadyStateSolution, guess, args=solver_params)
#Reshapes the ss code
ss = np.array(np.split(ss, I))
#Breaks down the steady state matrix into the two separate assets and kf matrices.
assets_ss = np.column_stack((np.zeros(I), ss[:,:-1], np.zeros(I)))
kf_ss = ss[:,-1]
#Gets the other steady-state values using assets and kf
kd_ss = get_kd(assets_ss, kf_ss)
n_ss = get_n(e[:,:,0])
Yparams = (alpha, A)
Y_ss = get_Y(Yparams, kd_ss, n_ss)
r_ss = get_r(alpha, Y_ss, kd_ss)
w_ss = get_w(alpha, Y_ss, n_ss)
cparams = (e, delta)
c_vec_ss = get_cvecss(cparams, w_ss, r_ss, assets_ss)
print "\nSteady State Found!\n"
return assets_ss, kf_ss, kd_ss, n_ss, Y_ss, r_ss[0], w_ss, c_vec_ss
#TIMEPATH FUNCTIONS
def get_initialguesses(params, assets_ss, kf_ss, w_ss, r_ss):
"""
Description:
With the parameters and steady state values, this function creates
initial guesses in a linear path.
Inputs:
-Params (Tuple): Tuple of parameters from Main.py
-Assets_ss[I,S,T+S+1]: Steady state assets value
-kf_ss[I,]: Steady State value of foreign owned domestic capital
-w_ss[I,]: Steady state value of wages
-r_ss[I,]: Steady state value of rental rate
Objects in Function:
-othervariable_params (Tuple): A tuple specifically made for GetOtherVariables
Outputs:
-assets_init[I,]: Initial Asset path
-kf_init[I,]: New initial foreign held capital
-w_initguess[I,T+S+1]: Initial guess wage timepath
-r_initguess[I,T+S+1]: Initial guess rental rate timepath
-k_init[I,]: total capital stock initial guess
-n_init[I,]: total labor initial guess
-y_init[I,]: output labor initial guess
-c_init[I,]: consumption initial guess
"""
I, S, T, delta, alpha, e, A = params
#Sets initial assets and kf, start with something close to the steady state
assets_init = assets_ss*.9
kf_init = kf_ss*0
w_initguess = np.zeros((I, T+S+1))
r_initguess = np.ones((T+S+1))*.5
#Gets initial kd, n, y, r, w, and K
kd_init = get_kd(assets_init, kf_init)
n_init = get_n(e[:,:,0])
Yparams = (alpha, A)
Y_init = get_Y(Yparams, kd_init, n_init)
r_init = get_r(alpha, Y_init, kd_init)
w_init = get_w(alpha, Y_init, n_init)
cparams = (e, delta)
c_init = get_cvecss(cparams, w_init, r_init, assets_init)
#Gets initial guess for rental rate path. This is set up to be linear.
r_initguess[:T+1] = np.linspace(r_init[0], r_ss, T+1)
r_initguess[T+1:] = r_initguess[T]
#Gets initial guess for wage path. This is set up to be linear.
for i in range(I):
w_initguess[i, :T+1] = np.linspace(w_init[i], w_ss[i], T+1)
w_initguess[i,T+1:] = w_initguess[i,T]
return assets_init, kf_init, w_initguess, r_initguess, kd_init, n_init, Y_init, c_init
def get_foreignK_path(params, Kpath, rpath, kf_ss):
"""
Description:
This calculates the timepath of the foreign capital stock. This is based on equation (1.12 and 1.13).
Inputs:
apath: Asset path, from our calculations
rpath: Rental Rate path, also from our calculation
Objects in Function:
kdpath[I,S+T+1]: Path of domestic owned capital
n[I,S+T+1]: Path of total labor
kf_ss[I,]: Calculated from the steady state.
A[I,]: Parameters from above
Outputs:
kfPath[I,S+T+1]: Path of domestic capital held by foreigners.
"""
I, S, T, alpha, e, A = params
#Sums the labor productivities across cohorts
n = np.sum(e, axis=1)
#Declares the array that will later be used.
kfPath=np.zeros((I,S+T+1))
kdPath=np.zeros((I,S+T+1))
#Gets the domestic-owned capital stock for each country except for the first country
kdPath[1:,:]=(rpath[:]/alpha)**(1/(alpha-1))*np.einsum("i,is->is", A[1:], n[1:,:])
#This is using equation 1.13 solved for the foreign capital stock to caluclate the foreign capital stock
#For everyone except the first country
kfPath[1:,:]=Kpath[1:,:]-kdPath[1:,:]
#To satisfy 1.18, the first country's assets is the negative of the sum of all the other countries' assets
kfPath[0,:]= -np.sum(kfPath[1:,:],axis=0)
#Making every year beyond t equal to the steady-state
kfPath[:,T:] = np.einsum("i,s->is", kf_ss, np.ones(S+1))
return kfPath
def get_lifetime_decisions(params, c_1, wpath_chunk, rpath_chunk, e_chunk, starting_assets, current_s):
"""
Description:
This solves for equations 1.15 and 1.16 in the StepbyStep pdf for a certain generation
Inputs:
-c_1: Initial consumption (not necessarily for the year they were born)
-wpath_chunk: Wages of an agents lifetime, a section of the timepath
-rpath_chunk: Rental rate of an agents lifetime, a section of the timepath
-e_chunk: Worker productivities of an agents lifetime, a section of the global matrix
-starting_assets: Initial assets of the agent. Will be 0s if we are beginning in the year the agent was born
-current_s: Current age of the agent
Objects in Function:
-NONE
Outputs:
-c_path[I, S]: Path of consumption until the agent dies
-asset_path[I, S+1]: Path of assets until the agent dies
"""
I, S, beta, sigma, delta = params
#Initializes the cpath and asset path vectors
c_path = np.zeros((I, S))
asset_path = np.zeros((I, S+1))
#For each country, the cpath and asset path vectors' are the initial values provided.
c_path[:,0] = c_1
asset_path[:,0] = starting_assets
#Based on the individual chunks, these are the households choices
for s in range(1,S):
c_path[:,s] = (beta * (1 + rpath_chunk[s] - delta))**(1/sigma) * c_path[:,s-1]
asset_path[:,s] = wpath_chunk[:,s]*e_chunk[:,s-1] + (1 + rpath_chunk[s-1] - delta)*asset_path[:,s-1] - c_path[:,s-1]
asset_path[:,s+1] = wpath_chunk[:,s]*e_chunk[:,s] + (1 + rpath_chunk[s] - delta)*asset_path[:,s] - c_path[:,s]
#Returns the relevant part of c_path and asset_path for all countries
return c_path[:,0:S-current_s], asset_path[:,0:S+1-current_s]
def find_optimal_starting_consumptions(c_1, wpath_chunk, rpath_chunk, epath_chunk, starting_assets, current_s, params):
"""
Description:
Takes the assets path from the get_householdchoices_path function and creates Euluer errors
Inputs:
Dimension varies
-c_1: Initial consumption (not necessarily for the year they were born)
-wpath_chunk: Wages of an agents lifetime, a part of the timepath
-rpath_chunk: Rental rate of an agents lifetime, another part of the timepath.
-epath_chunk: Worker productivities of an agents lifetime, another part.
-starting_assets: Initial assets of the agent. It's 0 at the beginning of life.
-current_s: Current age of the agent
Objects in Function:
-cpath: Path of consumption based on chunk given.
-assets_path: Path of assets based on the chunks given
Outputs:
-Euler:A flattened version of the assets_path matrix
"""
#Executes the get_household_choices_path function. Sees above.
c_path, assets_path = get_lifetime_decisions(params, c_1, wpath_chunk, rpath_chunk, epath_chunk, starting_assets, current_s)
if np.any(c_path<0):
c_path=np.ones(I)*9999.
Euler = np.ravel(assets_path[:,-1])
return Euler
def get_cons_assets_matrix(params, wpath, rpath, starting_assets):
I, S, T, T_1, beta, sigma, delta, e, StartFertilityAge, StartDyingAge, N_matrix, MortalityRates = params
#Initializes timepath variables
c_timepath = np.zeros((I,S,S+T+1))
a_timepath = np.zeros((I, S+1, S+T+1)) #I,S+1,S+T+1
a_timepath[:,:,0]=starting_assets
bq_timepath = np.zeros((I, S+1, S+T+1)) #Is this too big?
c_timepath[:,S-1,0] = wpath[:,0]*e[:,S-1,0] + (1 + rpath[0] - delta)*a_timepath[:,S-1,0]
#Fills the upper triangle
for s in range(S-2,-1, -1):
agent_assets = starting_assets[:,s]
#We are only doing this for all generations alive in time t=0
t = 0
#We are iterating through each generation in time t=0
current_s = s
#Uses the previous generation's consumption at age s to get the value for our guess
c_guess = c_timepath[:,s+1,t]/((beta*(1+rpath[t]-delta))**(1/sigma))
#Gets optimal initial consumption beginning in the current age of the agent using chunks of w and r that span the lifetime of the given generation
household_params = (I, S, beta, sigma, delta)
opt_consump = opt.fsolve(find_optimal_starting_consumptions, c_guess, args = \
(wpath[:,t:t+S], rpath[t:t+S], e[:,0,t:t+S],agent_assets, current_s, household_params))
#Gets optimal timepaths beginning initial consumption and starting assets
cpath_indiv, apath_indiv = get_lifetime_decisions\
(household_params, opt_consump, wpath[:,t:t+S], rpath[t:t+S], e[:,0,t:t+S], agent_assets, current_s)
for i in xrange(I):
np.fill_diagonal(c_timepath[i,s:,:], cpath_indiv[i,:])
np.fill_diagonal(a_timepath[i,s:,:], apath_indiv[i,:])
bq_params = (I, S, T, StartFertilityAge, StartDyingAge, N_matrix[:,StartDyingAge:,s], N_matrix[:,StartFertilityAge:StartDyingAge+1,s], MortalityRates[:,StartDyingAge:,s])
bq_timepath[:,:,S-s-2] = getBequests(bq_params, a_timepath[:,StartDyingAge:,S-s-2])
#print np.round(cpath_indiv[0,:], decimals=3), opt_consump[0]
#print np.round(np.transpose(c_timepath[0,:,:T_1-s+3]), decimals=3)
#print np.round(starting_assets[0,:], decimals=3)
#print np.round(assetpath_indiv[0,:], decimals=3), agent_assets[0]
#print np.round(np.transpose(a_timepath[0,:,:T_1]), decimals=3)
#Fills everything except for the upper triangle
for t in xrange(1,T):
current_s = 0 #This is always zero because this section deals with people who haven't been born yet in time T=0
agent_assets = np.zeros((I))
#Uses the previous generation's consumption at age s to get the value for our guess
c_guess = c_timepath[:,s+1,t]/((beta*(1+rpath[t+1]-delta))**(1/sigma))
optimalconsumption = opt.fsolve(find_optimal_starting_consumptions, c_guess, args = \
(wpath[:,t:t+S], rpath[t:t+S], e[:,0,t:t+S], agent_assets, current_s, household_params))
cpath_indiv, assetpath_indiv = get_lifetime_decisions\
(household_params, optimalconsumption, wpath[:,t:t+S], rpath[t:t+S], e[:,0,t:t+S], agent_assets, current_s)
for i in range(I):
np.fill_diagonal(c_timepath[i,:,t:], cpath_indiv[i,:])
np.fill_diagonal(a_timepath[i,:,t:], assetpath_indiv[i,:])
if t >= T_1:
temp_t = T_1
else:
temp_t = t
bq_params = (I, S, T, StartFertilityAge, StartDyingAge, N_matrix[:,StartDyingAge:,temp_t+S-2], N_matrix[:,StartFertilityAge:StartDyingAge+1,temp_t+S-2], MortalityRates[:,StartDyingAge:,temp_t+S-2])
bq_timepath[:,:,t+S-2] = getBequests(bq_params, a_timepath[:,StartDyingAge:,temp_t+S-2])
#bq_timepath[:,:,t+S-2] = getBequests(a_timepath[:,:,t+S-2], t+S-2)
return c_timepath, a_timepath
def get_wpathnew_rpathnew(params, wpath, rpath, starting_assets, kd_ss, kf_ss, w_ss, r_ss):
"""
Description:
Takes initial paths of wages and rental rates, gives the consumption path and the the wage and rental paths that are implied by that consumption path.
Inputs:
-w_path0[I, S+T+1]: initial w path
-r_path0[I, S+T+1]: initial r path
Objects in Function:
Note that these vary in dimension depending on the loop.
-current_s: The age of the cohort at time 0
-opt_consump: Solved for consumption
-starting_assets: Initial assets for the cohorts.
-cpath_indiv: The small chunk of cpath.
-assetpath_indiv: The small chunk of assetpath_indiv
-optimalconsumption: Solved from the chunks
-c_timepath: Overall consumption path
-a_timepath: Overall assets timepath
-kfpath: Foreign held domestic capital
-agent assets: Assets held by individuals.
Outputs:
-w_path1[I,S+T+1]: calculated w path
-r_path1[I,S+T+1]: calculated r path
-CPath[I,S+T+1]: Calculated aggregate consumption path for each country
-Kpath[I,S+T+1]: Calculated capital stock path.
-Ypath1[I, S+T+1]: timepath of assets implied from initial guess
"""
I, S, T, T_1, beta, sigma, delta, alpha, e, A, StartFertilityAge, StartDyingAge, N_matrix, MortalityRates = params
ca_params = (I, S, T, T_1, beta, sigma, delta, e, StartFertilityAge, StartDyingAge, N_matrix, MortalityRates)
c_timepath, a_timepath = get_cons_assets_matrix(ca_params, wpath, rpath, starting_assets)
#Calculates the total amount of capital in each country
Kpath=np.sum(a_timepath,axis=1)
#Calculates Aggregate Consumption
Cpath=np.sum(c_timepath,axis=1)
#After time period T, the total capital stock and total consumption is forced to be the steady state
Kpath[:,T:] = np.einsum("i,t->it", kd_ss+kf_ss, np.ones(S+1))
Cpath[:,T:] = np.einsum("i,t->it", Cpath[:,T-1], np.ones(S+1))
#Gets the foriegned owned capital
kf_params = (I, S, T, alpha, e, A)
kfpath = get_foreignK_path(kf_params, Kpath, rpath, kf_ss)
#Based on the overall capital path and the foreign owned capital path, we get new w and r paths.
kdpath = Kpath - kfpath
npath = get_n(e)
Yparams = (alpha, A)
Ypath = get_Y(Yparams, kdpath, npath)
rpath_new = get_r(alpha, Ypath[0], kdpath[0])
wpath_new = get_w(alpha, Ypath, npath)
#Checks to see if any of the timepaths have negative values
check_feasible(Kpath, Ypath, wpath, rpath, c_timepath)
return wpath_new, rpath_new, Cpath, Kpath, Ypath, a_timepath
def get_Timepath(params, wstart, rstart, assets_init, kd_ss, kf_ss, w_ss, r_ss):
I, S, T, T_1, beta, sigma, delta, alpha, e, A, StartFertilityAge, StartDyingAge, N_matrix, MortalityRates, distance, diff, xi, MaxIters = params
Iter=1 #Serves as the iteration counter
wr_params = (I, S, T, T_1, beta, sigma, delta, alpha, e, A, StartFertilityAge, StartDyingAge, N_matrix, MortalityRates)
while distance>diff and Iter<MaxIters: #The timepath iteration runs until the distance gets below a threshold or the iterations hit the maximum
wpath_new, rpath_new, Cpath_new, Kpath_new, Ypath_new, apath_new = \
get_wpathnew_rpathnew(wr_params, wstart, rstart, assets_init, kd_ss, kf_ss, w_ss, r_ss)
dist1=sp.linalg.norm(wstart-wpath_new,2) #Norm of the wage path
dist2=sp.linalg.norm(rstart-rpath_new,2) #Norm of the intrest rate path
distance=max([dist1,dist2]) #We take the maximum of the two norms to get the distance
print "Iteration:",Iter,", Norm Distance: ", distance#, "Euler Error, ", EError
Iter+=1 #Updates the iteration counter
if distance<diff or Iter==MaxIters: #When the distance gets below the tolerance or the maximum of iterations is hit, then the TPI finishes.
wend=wpath_new
rend=rpath_new
Cend=Cpath_new
Kend=Kpath_new
Yend=Ypath_new
aend=apath_new
#if Iter==MaxIters: #In case it never gets below the tolerance, it will throw this warning and give the last timepath.
#print "Doesn't converge within the maximum number of iterations"
#print "Providing the last iteration"
wstart=wstart*xi+(1-xi)*wpath_new #Convex conjugate of the wage path
rstart=rstart*xi+(1-xi)*rpath_new #Convex conjugate of the intrest rate path
return wend, rend, Cend, Kend, Yend, aend
def CountryLabel(Country): #Activated by line 28
'''
Description:
Converts the generic country label given for the graphs and converts it to a proper name
Inputs:
-Country (String): This is simply the generic country label
Objects in Function:
-NONE
Outputs:
-Name (String): The proper name of the country which you decide. Make sure the number of country names lines
up with the number of countries, otherwise, the function will not proceed.
'''
#Each country is given a number
if Country=="Country 0":
Name="United States"
if Country=="Country 1":
Name="Europe"
if Country=="Country 2":
Name="Japan"
if Country=="Country 3":
Name="China"
if Country=="Country 4":
Name="India"
if Country=="Country 5":
Name="Russia"
if Country=="Country 6":
Name="Korea"
#Add More Country labels here
return Name
def plotTimepaths(I, S, T, wpath, rpath, cpath, kpath, Ypath, CountryNamesON):
for i in xrange(I): #Wages
label1='Country '+str(i)
if CountryNamesON==True:
label1=CountryLabel(label1)
plt.plot(np.arange(0,T),wpath[i,:T], label=label1)
plt.title("Time path for Wages")
plt.ylabel("Wages")
plt.xlabel("Time Period")
plt.legend(loc="upper right")
plt.show()
#Rental Rates
label1='Global Interest Rate'
plt.plot(np.arange(0,T),rpath[:T], label=label1)
plt.title("Time path for Rental Rates")
plt.ylabel("Rental Rates")
plt.xlabel("Time Period")
plt.legend(loc="upper right")
plt.show()
for i in xrange(I): #Aggregate Consumption
label1='Country '+str(i)
if CountryNamesON==True:
label1=CountryLabel(label1)
plt.plot(np.arange(0,S+T+1),cpath[i,:],label=label1)
plt.title("Time Path for Aggregate Consumption")
plt.ylabel("Consumption Level")
plt.xlabel("Time Period")
plt.legend(loc="upper right")
plt.show()
for i in xrange(I): #Aggregate Capital Stock
label1='Country '+str(i)
if CountryNamesON==True:
label1=CountryLabel(label1)
plt.plot(np.arange(0,T),kpath[i,:T],label=label1)
plt.title("Time path for Capital Path")
plt.ylabel("Capital Stock level")
plt.xlabel("Time Period")
plt.legend(loc="upper right")
plt.show()
for i in xrange(I):
label1='Country '+str(i)
if CountryNamesON==True:
label1=CountryLabel(label1)
plt.plot(np.arange(0,T),Ypath[i,:T],label=label1)
plt.title("Time path for Output")
plt.ylabel("Output Stock level")
plt.xlabel("Time Period")
plt.legend(loc="upper right")
plt.show()
|
{
"content_hash": "61a78efe0d67e0c369e531c35481ed46",
"timestamp": "",
"source": "github",
"line_count": 973,
"max_line_length": 237,
"avg_line_length": 36.77081192189106,
"alnum_prop": 0.6653250600927945,
"repo_name": "kerkphil/multi-country",
"id": "9e2d13a4ac6e4646e7fcfaa6668f56869d4c8b51",
"size": "35778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/Archive/Stage2/Unincorporated Demographics/StepbyStepv1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "172"
},
{
"name": "Python",
"bytes": "719635"
},
{
"name": "TeX",
"bytes": "57744"
}
],
"symlink_target": ""
}
|
import datetime as dt
import itertools
import logging
import re
import urlparse
import bson
import pytz
import itsdangerous
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationError, ValidationValueError, QueryException
from modularodm.validators import URLValidator
import framework
from framework.addons import AddonModelMixin
from framework import analytics
from framework.auth import signals, utils
from framework.auth.exceptions import (ChangePasswordError, ExpiredTokenError, InvalidTokenError,
MergeConfirmedRequiredError, MergeConflictError)
from framework.bcrypt import generate_password_hash, check_password_hash
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.mongo.validators import string_required
from framework.sentry import log_exception
from framework.sessions import session
from framework.sessions.model import Session
from framework.sessions.utils import remove_sessions_for_user
from website import mails, settings, filters, security
name_formatters = {
'long': lambda user: user.fullname,
'surname': lambda user: user.family_name if user.family_name else user.fullname,
'initials': lambda user: u'{surname}, {initial}.'.format(
surname=user.family_name,
initial=user.given_name_initial,
),
}
logger = logging.getLogger(__name__)
# Hide implementation of token generation
def generate_confirm_token():
return security.random_string(30)
def generate_claim_token():
return security.random_string(30)
def validate_history_item(item):
string_required(item.get('institution'))
startMonth = item.get('startMonth')
startYear = item.get('startYear')
endMonth = item.get('endMonth')
endYear = item.get('endYear')
validate_year(startYear)
validate_year(endYear)
if startYear and endYear:
if endYear < startYear:
raise ValidationValueError('End date must be later than start date.')
elif endYear == startYear:
if endMonth and startMonth and endMonth < startMonth:
raise ValidationValueError('End date must be later than start date.')
def validate_year(item):
if item:
try:
int(item)
except ValueError:
raise ValidationValueError('Please enter a valid year.')
else:
if len(item) != 4:
raise ValidationValueError('Please enter a valid year.')
validate_url = URLValidator()
def validate_profile_websites(profile_websites):
for value in profile_websites or []:
try:
validate_url(value)
except ValidationError:
# Reraise with a better message
raise ValidationError('Invalid personal URL.')
def validate_social(value):
validate_profile_websites(value.get('profileWebsites'))
# TODO - rename to _get_current_user_from_session /HRYBACKI
def _get_current_user():
uid = session._get_current_object() and session.data.get('auth_user_id')
return User.load(uid)
# TODO: This should be a class method of User?
def get_user(email=None, password=None, verification_key=None):
"""Get an instance of User matching the provided params.
:return: The instance of User requested
:rtype: User or None
"""
# tag: database
if password and not email:
raise AssertionError("If a password is provided, an email must also "
"be provided.")
query_list = []
if email:
email = email.strip().lower()
query_list.append(Q('emails', 'eq', email) | Q('username', 'eq', email))
if password:
password = password.strip()
try:
query = query_list[0]
for query_part in query_list[1:]:
query = query & query_part
user = User.find_one(query)
except Exception as err:
logger.error(err)
user = None
if user and not user.check_password(password):
return False
return user
if verification_key:
query_list.append(Q('verification_key', 'eq', verification_key))
try:
query = query_list[0]
for query_part in query_list[1:]:
query = query & query_part
user = User.find_one(query)
return user
except Exception as err:
logger.error(err)
return None
class Auth(object):
def __init__(self, user=None, api_node=None,
private_key=None):
self.user = user
self.api_node = api_node
self.private_key = private_key
def __repr__(self):
return ('<Auth(user="{self.user}", '
'private_key={self.private_key})>').format(self=self)
@property
def logged_in(self):
return self.user is not None
@property
def private_link(self):
if not self.private_key:
return None
try:
# Avoid circular import
from website.project.model import PrivateLink
private_link = PrivateLink.find_one(
Q('key', 'eq', self.private_key)
)
if private_link.is_deleted:
return None
except QueryException:
return None
return private_link
@classmethod
def from_kwargs(cls, request_args, kwargs):
user = request_args.get('user') or kwargs.get('user') or _get_current_user()
private_key = request_args.get('view_only')
return cls(
user=user,
private_key=private_key,
)
class User(GuidStoredObject, AddonModelMixin):
# Node fields that trigger an update to the search engine on save
SEARCH_UPDATE_FIELDS = {
'fullname',
'given_name',
'middle_names',
'family_name',
'suffix',
'merged_by',
'date_disabled',
'date_confirmed',
'jobs',
'schools',
'social',
}
# TODO: Add SEARCH_UPDATE_NODE_FIELDS, for fields that should trigger a
# search update for all nodes to which the user is a contributor.
SOCIAL_FIELDS = {
'orcid': u'http://orcid.org/{}',
'github': u'http://github.com/{}',
'scholar': u'http://scholar.google.com/citations?user={}',
'twitter': u'http://twitter.com/{}',
'profileWebsites': [],
'linkedIn': u'https://www.linkedin.com/{}',
'impactStory': u'https://impactstory.org/{}',
'researcherId': u'http://researcherid.com/rid/{}',
'researchGate': u'https://researchgate.net/profile/{}',
'academiaInstitution': u'https://{}',
'academiaProfileID': u'.academia.edu/{}',
'baiduScholar': u'http://xueshu.baidu.com/scholarID/{}'
}
# This is a GuidStoredObject, so this will be a GUID.
_id = fields.StringField(primary=True)
# The primary email address for the account.
# This value is unique, but multiple "None" records exist for:
# * unregistered contributors where an email address was not provided.
# TODO: Update mailchimp subscription on username change in user.save()
username = fields.StringField(required=False, unique=True, index=True)
# Hashed. Use `User.set_password` and `User.check_password`
password = fields.StringField()
fullname = fields.StringField(required=True, validate=string_required)
# user has taken action to register the account
is_registered = fields.BooleanField(index=True)
# user has claimed the account
# TODO: This should be retired - it always reflects is_registered.
# While a few entries exist where this is not the case, they appear to be
# the result of a bug, as they were all created over a small time span.
is_claimed = fields.BooleanField(default=False, index=True)
# a list of strings - for internal use
system_tags = fields.StringField(list=True)
# security emails that have been sent
# TODO: This should be removed and/or merged with system_tags
security_messages = fields.DictionaryField()
# Format: {
# <message label>: <datetime>
# ...
# }
# user was invited (as opposed to registered unprompted)
is_invited = fields.BooleanField(default=False, index=True)
# Per-project unclaimed user data:
# TODO: add validation
unclaimed_records = fields.DictionaryField(required=False)
# Format: {
# <project_id>: {
# 'name': <name that referrer provided>,
# 'referrer_id': <user ID of referrer>,
# 'token': <token used for verification urls>,
# 'email': <email the referrer provided or None>,
# 'claimer_email': <email the claimer entered or None>,
# 'last_sent': <timestamp of last email sent to referrer or None>
# }
# ...
# }
# Time of last sent notification email to newly added contributors
# Format : {
# <project_id>: {
# 'last_sent': time.time()
# }
# ...
# }
contributor_added_email_records = fields.DictionaryField(default=dict)
# The user into which this account was merged
merged_by = fields.ForeignField('user', default=None, index=True)
# verification key used for resetting password
verification_key = fields.StringField()
email_last_sent = fields.DateTimeField()
# confirmed emails
# emails should be stripped of whitespace and lower-cased before appending
# TODO: Add validator to ensure an email address only exists once across
# all User's email lists
emails = fields.StringField(list=True)
# email verification tokens
# see also ``unconfirmed_emails``
email_verifications = fields.DictionaryField(default=dict)
# Format: {
# <token> : {'email': <email address>,
# 'expiration': <datetime>}
# }
# TODO remove this field once migration (scripts/migration/migrate_mailing_lists_to_mailchimp_fields.py)
# has been run. This field is deprecated and replaced with mailchimp_mailing_lists
mailing_lists = fields.DictionaryField()
# email lists to which the user has chosen a subscription setting
mailchimp_mailing_lists = fields.DictionaryField()
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# email lists to which the user has chosen a subscription setting, being sent from osf, rather than mailchimp
osf_mailing_lists = fields.DictionaryField(default=lambda: {settings.OSF_HELP_LIST: True})
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# the date this user was registered
# TODO: consider removal - this can be derived from date_registered
date_registered = fields.DateTimeField(auto_now_add=dt.datetime.utcnow,
index=True)
# watched nodes are stored via a list of WatchConfigs
watched = fields.ForeignField("WatchConfig", list=True)
# list of collaborators that this user recently added to nodes as a contributor
recently_added = fields.ForeignField("user", list=True)
# Attached external accounts (OAuth)
external_accounts = fields.ForeignField("externalaccount", list=True)
# CSL names
given_name = fields.StringField()
middle_names = fields.StringField()
family_name = fields.StringField()
suffix = fields.StringField()
# Employment history
jobs = fields.DictionaryField(list=True, validate=validate_history_item)
# Format: {
# 'title': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Educational history
schools = fields.DictionaryField(list=True, validate=validate_history_item)
# Format: {
# 'degree': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Social links
social = fields.DictionaryField(validate=validate_social)
# Format: {
# 'profileWebsites': <list of profile websites>
# 'twitter': <twitter id>,
# }
# hashed password used to authenticate to Piwik
piwik_token = fields.StringField()
# date the user last sent a request
date_last_login = fields.DateTimeField()
# date the user first successfully confirmed an email address
date_confirmed = fields.DateTimeField(index=True)
# When the user was disabled.
date_disabled = fields.DateTimeField(index=True)
# when comments were last viewed
comments_viewed_timestamp = fields.DictionaryField()
# Format: {
# 'Comment.root_target._id': 'timestamp',
# ...
# }
# timezone for user's locale (e.g. 'America/New_York')
timezone = fields.StringField(default='Etc/UTC')
# user language and locale data (e.g. 'en_US')
locale = fields.StringField(default='en_US')
_meta = {'optimistic': True}
def __repr__(self):
return '<User({0!r}) with id {1!r}>'.format(self.username, self._id)
def __str__(self):
return self.fullname.encode('ascii', 'replace')
__unicode__ = __str__
# For compatibility with Django auth
@property
def pk(self):
return self._id
@property
def email(self):
return self.username
def is_authenticated(self): # Needed for django compat
return True
def is_anonymous(self):
return False
@property
def absolute_api_v2_url(self):
from website import util
return util.api_v2_url('users/{}/'.format(self.pk))
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@classmethod
def create_unregistered(cls, fullname, email=None):
"""Create a new unregistered user.
"""
user = cls(
username=email,
fullname=fullname,
is_invited=True,
is_registered=False,
)
user.update_guessed_names()
return user
@classmethod
def create(cls, username, password, fullname):
user = cls(
username=username,
fullname=fullname,
)
user.update_guessed_names()
user.set_password(password)
return user
@classmethod
def create_unconfirmed(cls, username, password, fullname, do_confirm=True,
campaign=None):
"""Create a new user who has begun registration but needs to verify
their primary email address (username).
"""
user = cls.create(username, password, fullname)
user.add_unconfirmed_email(username)
user.is_registered = False
if campaign:
# needed to prevent cirular import
from framework.auth.campaigns import system_tag_for_campaign # skipci
user.system_tags.append(system_tag_for_campaign(campaign))
return user
@classmethod
def create_confirmed(cls, username, password, fullname):
user = cls.create(username, password, fullname)
user.is_registered = True
user.is_claimed = True
user.date_confirmed = user.date_registered
user.emails.append(username)
return user
@classmethod
def from_cookie(cls, cookie, secret=None):
"""Attempt to load a user from their signed cookie
:returns: None if a user cannot be loaded else User
"""
if not cookie:
return None
secret = secret or settings.SECRET_KEY
try:
token = itsdangerous.Signer(secret).unsign(cookie)
except itsdangerous.BadSignature:
return None
user_session = Session.load(token)
if user_session is None:
return None
return cls.load(user_session.data.get('auth_user_id'))
def get_or_create_cookie(self, secret=None):
"""Find the cookie for the given user
Create a new session if no cookie is found
:param str secret: The key to sign the cookie with
:returns: The signed cookie
"""
secret = secret or settings.SECRET_KEY
sessions = Session.find(
Q('data.auth_user_id', 'eq', self._id)
).sort(
'-date_modified'
).limit(1)
if sessions.count() > 0:
user_session = sessions[0]
else:
user_session = Session(data={
'auth_user_id': self._id,
'auth_user_username': self.username,
'auth_user_fullname': self.fullname,
})
user_session.save()
signer = itsdangerous.Signer(secret)
return signer.sign(user_session._id)
def update_guessed_names(self):
"""Updates the CSL name fields inferred from the the full name.
"""
parsed = utils.impute_names(self.fullname)
self.given_name = parsed['given']
self.middle_names = parsed['middle']
self.family_name = parsed['family']
self.suffix = parsed['suffix']
def register(self, username, password=None):
"""Registers the user.
"""
self.username = username
if password:
self.set_password(password)
if username not in self.emails:
self.emails.append(username)
self.is_registered = True
self.is_claimed = True
self.date_confirmed = dt.datetime.utcnow()
self.update_search()
self.update_search_nodes()
# Emit signal that a user has confirmed
signals.user_confirmed.send(self)
return self
def add_unclaimed_record(self, node, referrer, given_name, email=None):
"""Add a new project entry in the unclaimed records dictionary.
:param Node node: Node this unclaimed user was added to.
:param User referrer: User who referred this user.
:param str given_name: The full name that the referrer gave for this user.
:param str email: The given email address.
:returns: The added record
"""
if not node.can_edit(user=referrer):
raise PermissionsError('Referrer does not have permission to add a contributor '
'to project {0}'.format(node._primary_key))
project_id = node._primary_key
referrer_id = referrer._primary_key
if email:
clean_email = email.lower().strip()
else:
clean_email = None
record = {
'name': given_name,
'referrer_id': referrer_id,
'token': generate_confirm_token(),
'email': clean_email
}
self.unclaimed_records[project_id] = record
return record
def display_full_name(self, node=None):
"""Return the full name , as it would display in a contributor list for a
given node.
NOTE: Unclaimed users may have a different name for different nodes.
"""
if node:
unclaimed_data = self.unclaimed_records.get(node._primary_key, None)
if unclaimed_data:
return unclaimed_data['name']
return self.fullname
@property
def is_active(self):
"""Returns True if the user is active. The user must have activated
their account, must not be deleted, suspended, etc.
:return: bool
"""
return (self.is_registered and
self.password is not None and
not self.is_merged and
not self.is_disabled and
self.is_confirmed)
def get_unclaimed_record(self, project_id):
"""Get an unclaimed record for a given project_id.
:raises: ValueError if there is no record for the given project.
"""
try:
return self.unclaimed_records[project_id]
except KeyError: # reraise as ValueError
raise ValueError('No unclaimed record for user {self._id} on node {project_id}'
.format(**locals()))
def get_claim_url(self, project_id, external=False):
"""Return the URL that an unclaimed user should use to claim their
account. Return ``None`` if there is no unclaimed_record for the given
project ID.
:param project_id: The project ID for the unclaimed record
:raises: ValueError if a record doesn't exist for the given project ID
:rtype: dict
:returns: The unclaimed record for the project
"""
uid = self._primary_key
base_url = settings.DOMAIN if external else '/'
unclaimed_record = self.get_unclaimed_record(project_id)
token = unclaimed_record['token']
return '{base_url}user/{uid}/{project_id}/claim/?token={token}'\
.format(**locals())
def set_password(self, raw_password, notify=True):
"""Set the password for this user to the hash of ``raw_password``.
If this is a new user, we're done. If this is a password change,
then email the user about the change and clear all the old sessions
so that users will have to log in again with the new password.
:param raw_password: the plaintext value of the new password
:param notify: Only meant for unit tests to keep extra notifications from being sent
:rtype: list
:returns: Changed fields from the user save
"""
had_existing_password = bool(self.password)
self.password = generate_password_hash(raw_password)
if had_existing_password and notify:
mails.send_mail(
to_addr=self.username,
mail=mails.PASSWORD_RESET,
mimetype='plain',
user=self
)
remove_sessions_for_user(self)
def check_password(self, raw_password):
"""Return a boolean of whether ``raw_password`` was correct."""
if not self.password or not raw_password:
return False
return check_password_hash(self.password, raw_password)
@property
def csl_given_name(self):
parts = [self.given_name]
if self.middle_names:
parts.extend(each[0] for each in re.split(r'\s+', self.middle_names))
return ' '.join(parts)
@property
def csl_name(self):
return {
'family': self.family_name,
'given': self.csl_given_name,
}
@property
def created(self):
from website.project.model import Node
return Node.find(Q('creator', 'eq', self._id))
# TODO: This should not be on the User object.
def change_password(self, raw_old_password, raw_new_password, raw_confirm_password):
"""Change the password for this user to the hash of ``raw_new_password``."""
raw_old_password = (raw_old_password or '').strip()
raw_new_password = (raw_new_password or '').strip()
raw_confirm_password = (raw_confirm_password or '').strip()
issues = []
if not self.check_password(raw_old_password):
issues.append('Old password is invalid')
elif raw_old_password == raw_new_password:
issues.append('Password cannot be the same')
if not raw_old_password or not raw_new_password or not raw_confirm_password:
issues.append('Passwords cannot be blank')
elif len(raw_new_password) < 6:
issues.append('Password should be at least six characters')
elif len(raw_new_password) > 256:
issues.append('Password should not be longer than 256 characters')
if raw_new_password != raw_confirm_password:
issues.append('Password does not match the confirmation')
if issues:
raise ChangePasswordError(issues)
self.set_password(raw_new_password)
def _set_email_token_expiration(self, token, expiration=None):
"""Set the expiration date for given email token.
:param str token: The email token to set the expiration for.
:param datetime expiration: Datetime at which to expire the token. If ``None``, the
token will expire after ``settings.EMAIL_TOKEN_EXPIRATION`` hours. This is only
used for testing purposes.
"""
expiration = expiration or (dt.datetime.utcnow() + dt.timedelta(hours=settings.EMAIL_TOKEN_EXPIRATION))
self.email_verifications[token]['expiration'] = expiration
return expiration
def add_unconfirmed_email(self, email, expiration=None):
"""Add an email verification token for a given email."""
# TODO: This is technically not compliant with RFC 822, which requires
# that case be preserved in the "local-part" of an address. From
# a practical standpoint, the vast majority of email servers do
# not preserve case.
# ref: https://tools.ietf.org/html/rfc822#section-6
email = email.lower().strip()
if email in self.emails:
raise ValueError("Email already confirmed to this user.")
utils.validate_email(email)
# If the unconfirmed email is already present, refresh the token
if email in self.unconfirmed_emails:
self.remove_unconfirmed_email(email)
token = generate_confirm_token()
# handle when email_verifications is None
if not self.email_verifications:
self.email_verifications = {}
self.email_verifications[token] = {'email': email}
self._set_email_token_expiration(token, expiration=expiration)
return token
def remove_unconfirmed_email(self, email):
"""Remove an unconfirmed email addresses and their tokens."""
for token, value in self.email_verifications.iteritems():
if value.get('email') == email:
del self.email_verifications[token]
return True
return False
def remove_email(self, email):
"""Remove a confirmed email"""
if email == self.username:
raise PermissionsError("Can't remove primary email")
if email in self.emails:
self.emails.remove(email)
signals.user_email_removed.send(self, email=email)
@signals.user_email_removed.connect
def _send_email_removal_confirmations(self, email):
mails.send_mail(to_addr=self.username,
mail=mails.REMOVED_EMAIL,
user=self,
removed_email=email,
security_addr='alternate email address ({})'.format(email))
mails.send_mail(to_addr=email,
mail=mails.REMOVED_EMAIL,
user=self,
removed_email=email,
security_addr='primary email address ({})'.format(self.username))
def get_confirmation_token(self, email, force=False):
"""Return the confirmation token for a given email.
:param str email: Email to get the token for.
:param bool force: If an expired token exists for the given email, generate a new
token and return that token.
:raises: ExpiredTokenError if trying to access a token that is expired and force=False.
:raises: KeyError if there no token for the email.
"""
# TODO: Refactor "force" flag into User.get_or_add_confirmation_token
for token, info in self.email_verifications.items():
if info['email'].lower() == email.lower():
# Old records will not have an expiration key. If it's missing,
# assume the token is expired
expiration = info.get('expiration')
if not expiration or (expiration and expiration < dt.datetime.utcnow()):
if not force:
raise ExpiredTokenError('Token for email "{0}" is expired'.format(email))
else:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
return token
raise KeyError('No confirmation token for email "{0}"'.format(email))
def get_confirmation_url(self, email, external=True, force=False):
"""Return the confirmation url for a given email.
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: KeyError if there is no token for the email.
"""
base = settings.DOMAIN if external else '/'
token = self.get_confirmation_token(email, force=force)
return "{0}confirm/{1}/{2}/".format(base, self._primary_key, token)
def _get_unconfirmed_email_for_token(self, token):
"""Return whether or not a confirmation token is valid for this user.
:rtype: bool
"""
if token not in self.email_verifications:
raise InvalidTokenError
verification = self.email_verifications[token]
# Not all tokens are guaranteed to have expiration dates
if (
'expiration' in verification and
verification['expiration'] < dt.datetime.utcnow()
):
raise ExpiredTokenError
return verification['email']
def verify_claim_token(self, token, project_id):
"""Return whether or not a claim token is valid for this user for
a given node which they were added as a unregistered contributor for.
"""
try:
record = self.get_unclaimed_record(project_id)
except ValueError: # No unclaimed record for given pid
return False
return record['token'] == token
def confirm_email(self, token, merge=False):
"""Confirm the email address associated with the token"""
email = self._get_unconfirmed_email_for_token(token)
# If this email is confirmed on another account, abort
try:
user_to_merge = User.find_one(Q('emails', 'iexact', email))
except NoResultsFound:
user_to_merge = None
if user_to_merge and merge:
self.merge_user(user_to_merge)
elif user_to_merge:
raise MergeConfirmedRequiredError(
'Merge requires confirmation',
user=self,
user_to_merge=user_to_merge,
)
# If another user has this email as its username, get it
try:
unregistered_user = User.find_one(Q('username', 'eq', email) &
Q('_id', 'ne', self._id))
except NoResultsFound:
unregistered_user = None
if unregistered_user:
self.merge_user(unregistered_user)
self.save()
unregistered_user.username = None
if email not in self.emails:
self.emails.append(email)
# Complete registration if primary email
if email.lower() == self.username.lower():
self.register(self.username)
self.date_confirmed = dt.datetime.utcnow()
# Revoke token
del self.email_verifications[token]
# TODO: We can't assume that all unclaimed records are now claimed.
# Clear unclaimed records, so user's name shows up correctly on
# all projects
self.unclaimed_records = {}
self.save()
self.update_search_nodes()
return True
@property
def unconfirmed_emails(self):
# Handle when email_verifications field is None
email_verifications = self.email_verifications or {}
return [
each['email']
for each
in email_verifications.values()
]
def update_search_nodes(self):
"""Call `update_search` on all nodes on which the user is a
contributor. Needed to add self to contributor lists in search upon
registration or claiming.
"""
for node in self.contributed:
node.update_search()
def update_search_nodes_contributors(self):
"""
Bulk update contributor name on all nodes on which the user is
a contributor.
:return:
"""
from website.search import search
search.update_contributors(self.visible_contributor_to)
@property
def is_confirmed(self):
return bool(self.date_confirmed)
@property
def social_links(self):
social_user_fields = {}
for key, val in self.social.items():
if val and key in self.SOCIAL_FIELDS:
if not isinstance(val, basestring):
social_user_fields[key] = val
else:
social_user_fields[key] = self.SOCIAL_FIELDS[key].format(val)
return social_user_fields
@property
def biblio_name(self):
given_names = self.given_name + ' ' + self.middle_names
surname = self.family_name
if surname != given_names:
initials = [
name[0].upper() + '.'
for name in given_names.split(' ')
if name and re.search(r'\w', name[0], re.I)
]
return u'{0}, {1}'.format(surname, ' '.join(initials))
return surname
@property
def given_name_initial(self):
"""
The user's preferred initialization of their given name.
Some users with common names may choose to distinguish themselves from
their colleagues in this way. For instance, there could be two
well-known researchers in a single field named "Robert Walker".
"Walker, R" could then refer to either of them. "Walker, R.H." could
provide easy disambiguation.
NOTE: The internal representation for this should never end with a
period. "R" and "R.H" would be correct in the prior case, but
"R.H." would not.
"""
return self.given_name[0]
@property
def url(self):
return '/{}/'.format(self._primary_key)
@property
def api_url(self):
return '/api/v1/profile/{0}/'.format(self._primary_key)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def deep_url(self):
return '/profile/{}/'.format(self._primary_key)
def profile_image_url(self, size=None):
"""A generalized method for getting a user's profile picture urls.
We may choose to use some service other than gravatar in the future,
and should not commit ourselves to using a specific service (mostly
an API concern).
As long as we use gravatar, this is just a proxy to User.gravatar_url
"""
return self._gravatar_url(size)
def _gravatar_url(self, size):
return filters.gravatar(
self,
use_ssl=True,
size=size
)
def get_activity_points(self, db=None):
db = db or framework.mongo.database
return analytics.get_total_activity_count(self._primary_key, db=db)
def disable_account(self):
"""
Disables user account, making is_disabled true, while also unsubscribing user
from mailchimp emails.
"""
from website import mailchimp_utils
try:
mailchimp_utils.unsubscribe_mailchimp(
list_name=settings.MAILCHIMP_GENERAL_LIST,
user_id=self._id,
username=self.username
)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
pass
except mailchimp_utils.mailchimp.InvalidApiKeyError:
if not settings.ENABLE_EMAIL_SUBSCRIPTIONS:
pass
else:
raise
self.is_disabled = True
@property
def is_disabled(self):
"""Whether or not this account has been disabled.
Abstracts ``User.date_disabled``.
:return: bool
"""
return self.date_disabled is not None
@is_disabled.setter
def is_disabled(self, val):
"""Set whether or not this account has been disabled."""
if val and not self.date_disabled:
self.date_disabled = dt.datetime.utcnow()
elif val is False:
self.date_disabled = None
@property
def is_merged(self):
'''Whether or not this account has been merged into another account.
'''
return self.merged_by is not None
@property
def profile_url(self):
return '/{}/'.format(self._id)
@property
def contributed(self):
from website.project.model import Node
return Node.find(Q('contributors', 'eq', self._id))
@property
def contributor_to(self):
from website.project.model import Node
return Node.find(
Q('contributors', 'eq', self._id) &
Q('is_deleted', 'ne', True) &
Q('is_collection', 'ne', True)
)
@property
def visible_contributor_to(self):
from website.project.model import Node
return Node.find(
Q('contributors', 'eq', self._id) &
Q('is_deleted', 'ne', True) &
Q('is_collection', 'ne', True) &
Q('visible_contributor_ids', 'eq', self._id)
)
def get_summary(self, formatter='long'):
return {
'user_fullname': self.fullname,
'user_profile_url': self.profile_url,
'user_display_name': name_formatters[formatter](self),
'user_is_claimed': self.is_claimed
}
def save(self, *args, **kwargs):
# TODO: Update mailchimp subscription on username change
# Avoid circular import
from framework.analytics import tasks as piwik_tasks
self.username = self.username.lower().strip() if self.username else None
ret = super(User, self).save(*args, **kwargs)
if self.SEARCH_UPDATE_FIELDS.intersection(ret) and self.is_confirmed:
self.update_search()
self.update_search_nodes_contributors()
if settings.PIWIK_HOST and not self.piwik_token:
piwik_tasks.update_user(self._id)
return ret
def update_search(self):
from website import search
try:
search.search.update_user(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
@classmethod
def find_by_email(cls, email):
try:
user = cls.find_one(
Q('emails', 'eq', email)
)
return [user]
except:
return []
def serialize(self, anonymous=False):
return {
'id': utils.privacy_info_handle(self._primary_key, anonymous),
'fullname': utils.privacy_info_handle(self.fullname, anonymous, name=True),
'registered': self.is_registered,
'url': utils.privacy_info_handle(self.url, anonymous),
'api_url': utils.privacy_info_handle(self.api_url, anonymous),
}
###### OSF-Specific methods ######
def watch(self, watch_config):
"""Watch a node by adding its WatchConfig to this user's ``watched``
list. Raises ``ValueError`` if the node is already watched.
:param watch_config: The WatchConfig to add.
:param save: Whether to save the user.
"""
watched_nodes = [each.node for each in self.watched]
if watch_config.node in watched_nodes:
raise ValueError('Node is already being watched.')
watch_config.save()
self.watched.append(watch_config)
return None
def unwatch(self, watch_config):
"""Unwatch a node by removing its WatchConfig from this user's ``watched``
list. Raises ``ValueError`` if the node is not already being watched.
:param watch_config: The WatchConfig to remove.
:param save: Whether to save the user.
"""
for each in self.watched:
if watch_config.node._id == each.node._id:
from framework.transactions.context import TokuTransaction # Avoid circular import
with TokuTransaction():
# Ensure that both sides of the relationship are removed
each.__class__.remove_one(each)
self.watched.remove(each)
self.save()
return None
raise ValueError('Node not being watched.')
def is_watching(self, node):
'''Return whether a not a user is watching a Node.'''
watched_node_ids = set([config.node._id for config in self.watched])
return node._id in watched_node_ids
def get_recent_log_ids(self, since=None):
'''Return a generator of recent logs' ids.
:param since: A datetime specifying the oldest time to retrieve logs
from. If ``None``, defaults to 60 days before today. Must be a tz-aware
datetime because PyMongo's generation times are tz-aware.
:rtype: generator of log ids (strings)
'''
log_ids = []
# Default since to 60 days before today if since is None
# timezone aware utcnow
utcnow = dt.datetime.utcnow().replace(tzinfo=pytz.utc)
since_date = since or (utcnow - dt.timedelta(days=60))
for config in self.watched:
# Extract the timestamps for each log from the log_id (fast!)
# The first 4 bytes of Mongo's ObjectId encodes time
# This prevents having to load each Log Object and access their
# date fields
node_log_ids = [log.pk for log in config.node.logs
if bson.ObjectId(log.pk).generation_time > since_date and
log.pk not in log_ids]
# Log ids in reverse chronological order
log_ids = _merge_into_reversed(log_ids, node_log_ids)
return (l_id for l_id in log_ids)
def get_daily_digest_log_ids(self):
'''Return a generator of log ids generated in the past day
(starting at UTC 00:00).
'''
utcnow = dt.datetime.utcnow()
midnight = dt.datetime(
utcnow.year, utcnow.month, utcnow.day,
0, 0, 0, tzinfo=pytz.utc
)
return self.get_recent_log_ids(since=midnight)
@property
def can_be_merged(self):
"""The ability of the `merge_user` method to fully merge the user"""
return all((addon.can_be_merged for addon in self.get_addons()))
def merge_user(self, user):
"""Merge a registered user into this account. This user will be
a contributor on any project. if the registered user and this account
are both contributors of the same project. Then it will remove the
registered user and set this account to the highest permission of the two
and set this account to be visible if either of the two are visible on
the project.
:param user: A User object to be merged.
"""
# Fail if the other user has conflicts.
if not user.can_be_merged:
raise MergeConflictError("Users cannot be merged")
# Move over the other user's attributes
# TODO: confirm
for system_tag in user.system_tags:
if system_tag not in self.system_tags:
self.system_tags.append(system_tag)
self.is_claimed = self.is_claimed or user.is_claimed
self.is_invited = self.is_invited or user.is_invited
# copy over profile only if this user has no profile info
if user.jobs and not self.jobs:
self.jobs = user.jobs
if user.schools and not self.schools:
self.schools = user.schools
if user.social and not self.social:
self.social = user.social
unclaimed = user.unclaimed_records.copy()
unclaimed.update(self.unclaimed_records)
self.unclaimed_records = unclaimed
# - unclaimed records should be connected to only one user
user.unclaimed_records = {}
security_messages = user.security_messages.copy()
security_messages.update(self.security_messages)
self.security_messages = security_messages
for key, value in user.mailchimp_mailing_lists.iteritems():
# subscribe to each list if either user was subscribed
subscription = value or self.mailchimp_mailing_lists.get(key)
signals.user_merged.send(self, list_name=key, subscription=subscription)
# clear subscriptions for merged user
signals.user_merged.send(user, list_name=key, subscription=False, send_goodbye=False)
for target_id, timestamp in user.comments_viewed_timestamp.iteritems():
if not self.comments_viewed_timestamp.get(target_id):
self.comments_viewed_timestamp[target_id] = timestamp
elif timestamp > self.comments_viewed_timestamp[target_id]:
self.comments_viewed_timestamp[target_id] = timestamp
self.emails.extend(user.emails)
user.emails = []
for k, v in user.email_verifications.iteritems():
email_to_confirm = v['email']
if k not in self.email_verifications and email_to_confirm != user.username:
self.email_verifications[k] = v
user.email_verifications = {}
for institution in user.affiliated_institutions:
self.affiliated_institutions.append(institution)
user._affiliated_institutions = []
# FOREIGN FIELDS
for watched in user.watched:
if watched not in self.watched:
self.watched.append(watched)
user.watched = []
for account in user.external_accounts:
if account not in self.external_accounts:
self.external_accounts.append(account)
user.external_accounts = []
# - addons
# Note: This must occur before the merged user is removed as a
# contributor on the nodes, as an event hook is otherwise fired
# which removes the credentials.
for addon in user.get_addons():
user_settings = self.get_or_add_addon(addon.config.short_name)
user_settings.merge(addon)
user_settings.save()
# Disconnect signal to prevent emails being sent about being a new contributor when merging users
# be sure to reconnect it at the end of this code block. Import done here to prevent circular import error.
from website.addons.osfstorage.listeners import checkin_files_by_user
from website.project.signals import contributor_added, contributor_removed
from website.project.views.contributor import notify_added_contributor
from website.util import disconnected_from
# - projects where the user was a contributor
with disconnected_from(signal=contributor_added, listener=notify_added_contributor):
for node in user.contributed:
# Skip bookmark collection node
if node.is_bookmark_collection:
continue
# if both accounts are contributor of the same project
if node.is_contributor(self) and node.is_contributor(user):
if node.permissions[user._id] > node.permissions[self._id]:
permissions = node.permissions[user._id]
else:
permissions = node.permissions[self._id]
node.set_permissions(user=self, permissions=permissions)
visible1 = self._id in node.visible_contributor_ids
visible2 = user._id in node.visible_contributor_ids
if visible1 != visible2:
node.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
else:
node.add_contributor(
contributor=self,
permissions=node.get_permissions(user),
visible=node.get_visible(user),
log=False,
)
with disconnected_from(signal=contributor_removed, listener=checkin_files_by_user):
try:
node.remove_contributor(
contributor=user,
auth=Auth(user=self),
log=False,
)
except ValueError:
logger.error('Contributor {0} not in list on node {1}'.format(
user._id, node._id
))
node.save()
# - projects where the user was the creator
for node in user.created:
node.creator = self
node.save()
# - file that the user has checked_out, import done here to prevent import error
from website.files.models.base import FileNode
for file_node in FileNode.files_checked_out(user=user):
file_node.checkout = self
file_node.save()
# finalize the merge
remove_sessions_for_user(user)
# - username is set to None so the resultant user can set it primary
# in the future.
user.username = None
user.password = None
user.verification_key = None
user.osf_mailing_lists = {}
user.merged_by = self
user.save()
def get_projects_in_common(self, other_user, primary_keys=True):
"""Returns either a collection of "shared projects" (projects that both users are contributors for)
or just their primary keys
"""
if primary_keys:
projects_contributed_to = set(self.contributed.get_keys())
other_projects_primary_keys = set(other_user.contributed.get_keys())
return projects_contributed_to.intersection(other_projects_primary_keys)
else:
projects_contributed_to = set(self.contributed)
return projects_contributed_to.intersection(other_user.contributed)
def n_projects_in_common(self, other_user):
"""Returns number of "shared projects" (projects that both users are contributors for)"""
return len(self.get_projects_in_common(other_user, primary_keys=True))
def is_affiliated_with_institution(self, inst):
return inst in self.affiliated_institutions
def remove_institution(self, inst_id):
removed = False
for inst in self.affiliated_institutions:
if inst._id == inst_id:
self.affiliated_institutions.remove(inst)
removed = True
return removed
_affiliated_institutions = fields.ForeignField('node', list=True)
@property
def affiliated_institutions(self):
from website.institutions.model import Institution, AffiliatedInstitutionsList
return AffiliatedInstitutionsList([Institution(inst) for inst in self._affiliated_institutions], obj=self, private_target='_affiliated_institutions')
def get_node_comment_timestamps(self, target_id):
""" Returns the timestamp for when comments were last viewed on a node, file or wiki.
"""
default_timestamp = dt.datetime(1970, 1, 1, 12, 0, 0)
return self.comments_viewed_timestamp.get(target_id, default_timestamp)
def _merge_into_reversed(*iterables):
'''Merge multiple sorted inputs into a single output in reverse order.
'''
return sorted(itertools.chain(*iterables), reverse=True)
|
{
"content_hash": "747ff337de857f33c41a0f3b9dc880da",
"timestamp": "",
"source": "github",
"line_count": 1432,
"max_line_length": 157,
"avg_line_length": 36.50768156424581,
"alnum_prop": 0.6083130893857954,
"repo_name": "TomHeatwole/osf.io",
"id": "6e4e476596e73923e063b576b86dd2b75376af7e",
"size": "52303",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "framework/auth/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "140360"
},
{
"name": "HTML",
"bytes": "94857"
},
{
"name": "JavaScript",
"bytes": "1561313"
},
{
"name": "Mako",
"bytes": "659751"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5250038"
}
],
"symlink_target": ""
}
|
import mysql.connector
def connect():
return mysql.connector.connect(user="root",password="wujixiaoo",database="suiyue")
|
{
"content_hash": "558549102c67c045f3e6baa03cd7ed12",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 83,
"avg_line_length": 30.75,
"alnum_prop": 0.7804878048780488,
"repo_name": "oujiaqi/suiyue",
"id": "c17558ce1fab7b5d00fa5df8bfd75d973e1c785f",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30121"
},
{
"name": "HTML",
"bytes": "65914"
},
{
"name": "JavaScript",
"bytes": "13738"
},
{
"name": "Python",
"bytes": "60397"
}
],
"symlink_target": ""
}
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "georef.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "b22a2a7aeb65705be1fb2aefc3b8238b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 66,
"avg_line_length": 33,
"alnum_prop": 0.8,
"repo_name": "LKajan/georef",
"id": "6f7199894f842265d1b95d135c834352a2d25874",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoProject/djangoProject/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3984"
},
{
"name": "JavaScript",
"bytes": "10958"
},
{
"name": "Python",
"bytes": "21524"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Find Fruit',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': ('Searches for low-hanging web applications.'),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['CIDR Parser credits to http://bibing.us.es/proyectos/abreproy/12106/fichero/ARCHIVOS%252Fservidor_xmlrpc%252Fcidr.py']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Target' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'IP Address or CIDR to scan.',
'Required' : True,
'Value' : ''
},
'Port' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'The port to scan on.',
'Required' : True,
'Value' : '8080'
},
'SSL' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'True/False to force SSL',
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
target = self.options['Target']['Value']
port = self.options['Port']['Value']
ssl = self.options['SSL']['Value']
script = """
import urllib2
import sys
import re
import subprocess
iplist = []
def ip2bin(ip):
b = ""
inQuads = ip.split(".")
outQuads = 4
for q in inQuads:
if q != "":
b += dec2bin(int(q),8)
outQuads -= 1
while outQuads > 0:
b += "00000000"
outQuads -= 1
return b
def dec2bin(n,d=None):
s = ""
while n>0:
if n&1:
s = "1"+s
else:
s = "0"+s
n >>= 1
if d is not None:
while len(s)<d:
s = "0"+s
if s == "": s = "0"
return s
def bin2ip(b):
ip = ""
for i in range(0,len(b),8):
ip += str(int(b[i:i+8],2))+"."
return ip[:-1]
def printCIDR(c):
parts = c.split("/")
baseIP = ip2bin(parts[0])
subnet = int(parts[1])
if subnet == 32:
print bin2ip(baseIP)
else:
ipPrefix = baseIP[:-(32-subnet)]
for i in range(2**(32-subnet)):
iplist.append(bin2ip(ipPrefix+dec2bin(i, (32-subnet))))
return
def validateCIDRBlock(b):
p = re.compile("^([0-9]{1,3}\.){0,3}[0-9]{1,3}(/[0-9]{1,2}){1}$")
if not p.match(b):
print "Error: Invalid CIDR format!"
return False
prefix, subnet = b.split("/")
quads = prefix.split(".")
for q in quads:
if (int(q) < 0) or (int(q) > 255):
print "Error: quad "+str(q)+" wrong size."
return False
if (int(subnet) < 1) or (int(subnet) > 32):
print "Error: subnet "+str(subnet)+" wrong size."
return False
return True
def http_get(url):
req = urllib2.Request(url)
resp = urllib2.urlopen(req, timeout = 1)
code = resp.getcode()
if code == 200:
print url + " returned 200!"
return
def main(ip, port, ssl):
if ssl == True:
http = "https"
elif ssl == False:
http = "http"
VulnLinks = []
if '/' in ip:
printCIDR(ip)
for ip in iplist:
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "jmx-console/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "web-console/ServerInfo.jsp")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "invoker/JMXInvokerServlet")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "lc/system/console")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "axis2/axis2-admin/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "manager/html/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "tomcat/manager/html/")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "wp-admin")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "workorder/FileDownload.jsp")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "ibm/console/logon.jsp?action=OK")
VulnLinks.append(http + '://' + ip + ':' + port + '/' + "data/login")
else:
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'jmx-console/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'web-console/ServerInfo.jsp')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'invoker/JMXInvokerServlet')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'lc/system/console')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'axis2/axis2-admin/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'manager/html/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'tomcat/manager/html/')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'wp-admin')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'workorder/FileDownload.jsp')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'ibm/console/logon.jsp?action=OK')
VulnLinks.append(http + '://' + ip + ':' + port + '/' + 'data/login')
for link in VulnLinks:
while True:
try:
req = urllib2.Request(link)
resp = urllib2.urlopen(req, timeout = 1)
code = resp.getcode()
if code == 200:
print link + " returned 200!"
break
except urllib2.URLError:
break
ip = "%s"
port = str("%s")
ssl = %s
main(ip, port, ssl)
""" %(target, port, ssl)
return script
|
{
"content_hash": "3b6e6f004af47a753515495f22154c31",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 144,
"avg_line_length": 31.86234817813765,
"alnum_prop": 0.4889453621346887,
"repo_name": "adaptivethreat/Empire",
"id": "57efe3079ab7c138ea03cdbd8fa38d768a1481d3",
"size": "7870",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "lib/modules/python/situational_awareness/network/find_fruit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "2563"
},
{
"name": "PowerShell",
"bytes": "12091585"
},
{
"name": "Python",
"bytes": "1207008"
},
{
"name": "Shell",
"bytes": "2279"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('stories', '0005_auto_20141221_1953'),
]
operations = [
migrations.AlterField(
model_name='story',
name='date_of_visit',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=True,
),
]
|
{
"content_hash": "0d3cec1dd3c1aadbec45502fa5f772d1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 23.4,
"alnum_prop": 0.6175213675213675,
"repo_name": "klpdotorg/dubdubdub",
"id": "a8f12183bf840b5921d034b4d647928151515879",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/stories/migrations/0006_auto_20141222_2030.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "478"
},
{
"name": "CSS",
"bytes": "335110"
},
{
"name": "HTML",
"bytes": "655218"
},
{
"name": "JavaScript",
"bytes": "1941014"
},
{
"name": "PLpgSQL",
"bytes": "156345"
},
{
"name": "Python",
"bytes": "920256"
},
{
"name": "Shell",
"bytes": "10544"
}
],
"symlink_target": ""
}
|
"""Wraps a Multigrid multiagent environment to be used as a dm_env."""
from typing import Any, Dict, List, Optional
import warnings
from acme import specs
from acme import types
from acme import wrappers
from acme.multiagent import types as ma_types
from acme.wrappers import multiagent_dict_key_wrapper
import dm_env
import gym
from gym import spaces
import jax
import numpy as np
import tree
try:
# The following import registers multigrid environments in gym. Do not remove.
# pylint: disable=unused-import, disable=g-import-not-at-top
# pytype: disable=import-error
from social_rl.gym_multigrid import multigrid
# pytype: enable=import-error
# pylint: enable=unused-import, enable=g-import-not-at-top
except ModuleNotFoundError as err:
raise ModuleNotFoundError(
'The multiagent multigrid environment module could not be found. '
'Ensure you have downloaded it from '
'https://github.com/google-research/google-research/tree/master/social_rl/gym_multigrid'
' before running this example.') from err
# Disables verbose np.bool warnings that occur in multigrid.
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
message='`np.bool` is a deprecated alias')
class MultigridWrapper(dm_env.Environment):
"""Environment wrapper for Multigrid environments.
Note: the main difference with vanilla GymWrapper is that reward_spec() is
overridden and rewards are cast to np.arrays in step()
"""
def __init__(self, environment: multigrid.MultiGridEnv):
"""Initializes environment.
Args:
environment: the environment.
"""
self._environment = environment
self._reset_next_step = True
self._last_info = None
self.num_agents = environment.n_agents # pytype: disable=attribute-error
# Convert action and observation specs.
obs_space = self._environment.observation_space
act_space = self._environment.action_space
self._observation_spec = _convert_to_spec(
obs_space, self.num_agents, name='observation')
self._action_spec = _convert_to_spec(
act_space, self.num_agents, name='action')
def process_obs(self, observation: types.NestedArray) -> types.NestedArray:
# Convert observations to agent-index-first format
observation = dict_obs_to_list_obs(observation)
# Assign dtypes to multigrid observations (some of which are lists by
# default, so do not have a precise dtype that matches their observation
# spec. This ensures no replay signature mismatch issues occur).
observation = tree.map_structure(lambda x, t: np.asarray(x, dtype=t.dtype),
observation, self.observation_spec())
return observation
def reset(self) -> dm_env.TimeStep:
"""Resets the episode."""
self._reset_next_step = False
observation = self.process_obs(self._environment.reset())
# Reset the diagnostic information.
self._last_info = None
return dm_env.restart(observation)
def step(self, action: types.NestedArray) -> dm_env.TimeStep:
"""Steps the environment."""
if self._reset_next_step:
return self.reset()
observation, reward, done, info = self._environment.step(action)
observation = self.process_obs(observation)
self._reset_next_step = done
self._last_info = info
def _map_reward_spec(x, t):
if np.isscalar(x):
return t.dtype.type(x)
return np.asarray(x, dtype=t.dtype)
reward = tree.map_structure(
_map_reward_spec,
reward,
self.reward_spec())
if done:
truncated = info.get('TimeLimit.truncated', False)
if truncated:
return dm_env.truncation(reward, observation)
return dm_env.termination(reward, observation)
return dm_env.transition(reward, observation)
def observation_spec(self) -> types.NestedSpec: # Internal pytype check.
return self._observation_spec
def action_spec(self) -> types.NestedSpec: # Internal pytype check.
return self._action_spec
def reward_spec(self) -> types.NestedSpec: # Internal pytype check.
return [specs.Array(shape=(), dtype=float, name='rewards')
] * self._environment.n_agents
def get_info(self) -> Optional[Dict[str, Any]]:
"""Returns the last info returned from env.step(action).
Returns:
info: dictionary of diagnostic information from the last environment step
"""
return self._last_info
@property
def environment(self) -> gym.Env:
"""Returns the wrapped environment."""
return self._environment
def __getattr__(self, name: str) -> Any:
"""Returns any other attributes of the underlying environment."""
return getattr(self._environment, name)
def close(self):
self._environment.close()
def _get_single_agent_spec(spec):
"""Returns a single-agent spec from multiagent multigrid spec.
Primarily used for converting multigrid specs to multiagent Acme specs,
wherein actions and observations specs are expected to be lists (each entry
corresponding to the spec of that particular agent). Note that this function
assumes homogeneous observation / action specs across all agents, which is the
case in multigrid.
Args:
spec: multigrid environment spec.
"""
def make_single_agent_spec(spec):
if not spec.shape: # Rewards & discounts
shape = ()
elif len(spec.shape) == 1: # Actions
shape = ()
else: # Observations
shape = spec.shape[1:]
if isinstance(spec, specs.BoundedArray):
# Bounded rewards and discounts often have no dimensions as they are
# amongst the agents, whereas observations are of shape [num_agents, ...].
# The following pair of if statements handle both cases accordingly.
minimum = spec.minimum if spec.minimum.ndim == 0 else spec.minimum[0]
maximum = spec.maximum if spec.maximum.ndim == 0 else spec.maximum[0]
return specs.BoundedArray(
shape=shape,
name=spec.name,
minimum=minimum,
maximum=maximum,
dtype=spec.dtype)
elif isinstance(spec, specs.DiscreteArray):
return specs.DiscreteArray(
num_values=spec.num_values, dtype=spec.dtype, name=spec.name)
elif isinstance(spec, specs.Array):
return specs.Array(shape=shape, dtype=spec.dtype, name=spec.name)
else:
raise ValueError(f'Unexpected spec type {type(spec)}.')
single_agent_spec = jax.tree_map(make_single_agent_spec, spec)
return single_agent_spec
def _gym_to_spec(space: gym.Space,
name: Optional[str] = None) -> types.NestedSpec:
"""Converts an OpenAI Gym space to a dm_env spec or nested structure of specs.
Box, MultiBinary and MultiDiscrete Gym spaces are converted to BoundedArray
specs. Discrete OpenAI spaces are converted to DiscreteArray specs. Tuple and
Dict spaces are recursively converted to tuples and dictionaries of specs.
Args:
space: The Gym space to convert.
name: Optional name to apply to all return spec(s).
Returns:
A dm_env spec or nested structure of specs, corresponding to the input
space.
"""
if isinstance(space, spaces.Discrete):
return specs.DiscreteArray(num_values=space.n, dtype=space.dtype, name=name)
elif isinstance(space, spaces.Box):
return specs.BoundedArray(
shape=space.shape,
dtype=space.dtype,
minimum=space.low,
maximum=space.high,
name=name)
elif isinstance(space, spaces.MultiBinary):
return specs.BoundedArray(
shape=space.shape,
dtype=space.dtype,
minimum=0.0,
maximum=1.0,
name=name)
elif isinstance(space, spaces.MultiDiscrete):
return specs.BoundedArray(
shape=space.shape,
dtype=space.dtype,
minimum=np.zeros(space.shape),
maximum=space.nvec - 1,
name=name)
elif isinstance(space, spaces.Tuple):
return tuple(_gym_to_spec(s, name) for s in space.spaces)
elif isinstance(space, spaces.Dict):
return {
key: _gym_to_spec(value, key) for key, value in space.spaces.items()
}
else:
raise ValueError('Unexpected gym space: {}'.format(space))
def _convert_to_spec(space: gym.Space,
num_agents: int,
name: Optional[str] = None) -> types.NestedSpec:
"""Converts multigrid Gym space to an Acme multiagent spec.
Args:
space: The Gym space to convert.
num_agents: the number of agents.
name: Optional name to apply to all return spec(s).
Returns:
A dm_env spec or nested structure of specs, corresponding to the input
space.
"""
# Convert gym specs to acme specs
spec = _gym_to_spec(space, name)
# Then change spec indexing from observation-key-first to agent-index-first
return [_get_single_agent_spec(spec)] * num_agents
def dict_obs_to_list_obs(
observation: types.NestedArray
) -> List[Dict[ma_types.AgentID, types.NestedArray]]:
"""Returns multigrid observations converted to agent-index-first format.
By default, multigrid observations are structured as:
observation['image'][agent_index]
observation['direction'][agent_index]
...
However, multiagent Acme expects observations with agent indices first:
observation[agent_index]['image']
observation[agent_index]['direction']
This function simply converts multigrid observations to the latter format.
Args:
observation:
"""
return [dict(zip(observation, v)) for v in zip(*observation.values())]
def make_multigrid_environment(
env_name: str = 'MultiGrid-Empty-5x5-v0') -> dm_env.Environment:
"""Returns Multigrid Multiagent Gym environment.
Args:
env_name: name of multigrid task. See social_rl.gym_multigrid.envs for the
available environments.
"""
# Load the gym environment.
env = gym.make(env_name)
# Make sure the environment obeys the dm_env.Environment interface.
env = MultigridWrapper(env)
env = wrappers.SinglePrecisionWrapper(env)
env = multiagent_dict_key_wrapper.MultiagentDictKeyWrapper(env)
return env
|
{
"content_hash": "78297e11e99baa8c433101cbfaf00a49",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 94,
"avg_line_length": 33.52,
"alnum_prop": 0.6938146380270486,
"repo_name": "deepmind/acme",
"id": "9c4b2287bbdb7c0512033dd175b7bc32a660e512",
"size": "10672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/wrappers/multigrid_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2182865"
},
{
"name": "Shell",
"bytes": "2668"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
import time
import requests
from bioblend.galaxy import GalaxyInstance
from bioblend.galaxy import dataset_collections as collections
from bioblend import ConnectionError
from pyaccessories.TimeLog import Timer
import zipfile
class AutoSNVPhylError(ValueError):
"""Raise when a specific subset of values in context of app is wrong"""
def __init__(self, message, *args):
self.message = message # without this you may get DeprecationWarning
# allow users initialize misc. arguments as any other builtin Error
super(AutoSNVPhylError, self).__init__(message, *args)
class AutoSNVPhyl(object):
def run(self):
try:
self.load() # Load from config
self.gi = GalaxyInstance(self.IP, key=self.API_KEY)
if not self.manual and self.reference is None:
# No reference and it isn't using files in upload folder
self.t.time_print("No reference file specified with -r, please input one or use the --manual"
" flag to use a reference file that you put in the upload folder.")
exit(1)
if self.noextract and not self.manual:
self.t.time_print("[Warning] Using manual flag since noextract was specified without manual.")
self.manual = True
return self.main() # Return the path to the results zip
except:
import traceback
# Print error to file
self.t.time_print("[Error Dump]\n" + traceback.format_exc())
raise
def main(self):
if self.inputs is not None:
if len(self.inputs['rename']) > 0:
self.rename = True
print(self.rename)
print(self.inputs['rename'])
# Create history in Galaxy
self.t.time_print("Creating history " + self.NAME)
while True:
try:
self.history_id = self.gi.histories.create_history(self.NAME)['id']
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
self.t.time_print(self.history_id)
# Begin uploading files to Galaxy
self.t.time_print("Uploading files to galaxy...")
# Upload files from the NAS based on the SEQ-ID list given
if not self.noextract:
self.t.time_print("Finding files on the NAS...")
# Get list of files to retrieve
to_upload = self.extract_files()
# Upload to galaxy
self.t.time_print("Uploading files from the NAS...")
n = 1
nfiles = len(to_upload)
for file in to_upload:
self.t.time_print("%d of %d: Uploading %s" % (n, nfiles, file))
self.upload_file(file)
n += 1
# Upload files from the upload folder if the manual flag is used
if self.manual:
self.t.time_print("Using files in upload folder since -m was used")
n = 1
upload_folder = os.path.join(self.script_dir, "upload")
files = os.listdir(upload_folder)
nfiles = len(files)
for file in files:
self.t.time_print("%d of %d: Uploading %s from %s directory." % (n, nfiles, file, upload_folder))
self.upload_file(os.path.join(upload_folder, file))
n += 1
self.t.time_print("Waiting for files to finish uploading...")
while True:
try:
while self.gi.histories.show_history(self.history_id)["state"] != "ok":
time.sleep(10)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
# Check if all the files are on galaxy and that there are no duplicate/extra files there
# Create list that stores all the files on galaxy
on_galaxy = []
while True:
try:
datasets = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
for dataset in datasets:
on_galaxy.append(dataset['name'])
# Check for duplicate files
count = {}
for file in on_galaxy:
try:
self.t.time_print(count[file]) # If this succeeds then the file is already on galaxy so duplicate
self.t.time_print("[Error] Duplicate file %s on galaxy!" % file)
raise AutoSNVPhylError("Duplicate file on galaxy!")
except KeyError:
# If it isn't already in the dictionary add it to the dictionary
count[file] = True
# Print all the files that weren't successfully uploaded.
for file in self.uploaded:
if file not in on_galaxy:
# It wasn't decompressed
if file + ".gz" in on_galaxy:
if 'R1' in file:
n = 'R1'
else:
n = 'R2'
# Re-upload the file
self.t.time_print("[Warning] File %s wasn't automatically decompressed by galaxy,"
" re-uploading..." % file + '.gz')
self.upload_file(self.extractor.retrieve_file(file.split('_')[0], filetype="fastq_" + n,
getpathonly=True))
if not self.upload_check(file):
errmsg = "[Error] File %s wasn't automatically decompressed by galaxy again, something is " \
"wrong with the file?" % file + '.gz'
self.t.time_print(errmsg)
raise AutoSNVPhylError(errmsg)
else:
n = ""
if 'R1' in file:
n = 'R1'
elif 'R2'in file:
n = 'R2'
self.t.time_print("[Warning] File %s wasn't uploaded to galaxy! Attempting to re-upload" % file)
if n == "":
if file.endswith('.fasta'):
self.upload_file(self.extractor.retrieve_file(file.split('.')[0], filetype="fasta",
getpathonly=True))
else:
self.upload_file(self.extractor.retrieve_file(file.split('_')[0], filetype="fastq_" + n,
getpathonly=True))
if not self.upload_check(file):
errmsg = "[Error] File %s couldn't be uploaded to galaxy!" % file
self.t.time_print(errmsg)
raise AutoSNVPhylError(errmsg)
self.t.time_print("Finished uploading.")
self.t.time_print("Building list of dataset pairs...")
self.build_list()
self.t.time_print("Starting workflow...")
self.run_workflow()
time.sleep(10) # Give it a bit of time to start the workflow
# Wait for workflow to finish
self.t.time_print("Waiting for workflow to finish.")
wait = 0
longwait = 24
while True:
try:
history_state = self.gi.histories.show_history(self.history_id)["state"]
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
while history_state != "ok":
wait += 1
if wait > 60: # 10 minutes
self.t.time_print("Still waiting for workflow to finish.")
wait = 0
longwait += 1
if longwait > 23:
raise AutoSNVPhylError("SNVPhyl took to long, please check galaxy history called %s" %
str(self.NAME))
time.sleep(10)
while True:
try:
history_state = self.gi.histories.show_history(self.history_id)["state"]
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
if history_state == "error":
self.t.time_print("Something went wrong with your SNVPhyl! Check the galaxy history called %s" % self.NAME)
raise AutoSNVPhylError("Something went wrong with your SNVPhyl! "
"Check the galaxy history called %s" % self.NAME)
self.t.time_print("Workflow finished, downloading files...")
to_download = [
"snvMatrix.tsv",
"phylogeneticTreeStats.txt",
"phylogeneticTree.newick",
"filterStats.txt",
"snvAlignment.phy",
"vcf2core.tsv",
"snvTable.tsv"
]
self.t.time_print("Creating directory %s." % self.NAME)
folder = os.path.join(self.script_dir, 'results', self.NAME)
if not os.path.exists(folder):
os.makedirs(folder)
self.t.time_print("Downloading files:")
not_downloaded = to_download
while True:
try:
datasets = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
for dataset in datasets:
# Renames and downloads
if dataset["name"] in to_download:
self.t.time_print(" Downloading %s to %s" % (dataset["name"], os.path.join(folder, dataset["name"])))
while True:
try:
self.gi.datasets.download_dataset(dataset["id"], os.path.join(folder, dataset["name"]),
wait_for_completion=True, use_default_filename=False)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
not_downloaded.remove(dataset["name"])
if len(not_downloaded) > 0:
self.t.time_print("[Warning] Can't find some results files on Galaxy!,"
" these will not be included in the zip file: ")
for missing in to_download:
self.t.time_print(" %s" % missing)
self.zip_results(folder)
self.t.time_print("Completed")
return os.path.join(self.script_dir, folder, self.NAME + '.zip')
def upload_check(self, filename):
self.t.time_print("Waiting for upload to finish...")
while True:
try:
while self.gi.histories.show_history(self.history_id)["state"] != "ok":
time.sleep(10)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
# Check if the file is on galaxy
on_galaxy = []
while True:
try:
datasets = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
for dataset in datasets:
on_galaxy.append(dataset['name'])
if filename in on_galaxy:
return True
else:
return False
def zip_results(self, r_folder):
f_list = [
"snvMatrix.tsv",
"phylogeneticTreeStats.txt",
"phylogeneticTree.newick",
"filterStats.txt",
"snvAlignment.phy",
"vcf2core.tsv",
"snvTable.tsv"
]
# Zip all the files
results_zip = os.path.join(self.script_dir, r_folder, self.NAME + '.zip')
self.t.time_print("Creating zip file %s" % results_zip)
try:
os.remove(results_zip)
except OSError:
pass
zipf = zipfile.ZipFile(results_zip, 'w', zipfile.ZIP_DEFLATED)
for to_zip in f_list:
try:
zipf.write(os.path.join(r_folder, to_zip), arcname=to_zip)
self.t.time_print("Zipped %s" % to_zip)
except FileNotFoundError:
self.t.time_print("[Warning] Can't find %s, will leave it out of .zip." % to_zip)
raise
zipf.close()
def upload_file(self, path):
# TODO I removed some stuff needs testing before being run permanently.
from bioblend import ConnectionError as bioblendConnectionError
import time
attempts = 0
while True:
try:
if self.rename:
if path.endswith('.fasta'):
ending = '.fasta'
seqid = os.path.split(path)[-1].split('.')[0]
else:
if 'r2' in os.path.split(path)[-1].lower():
ending = '_R2.fastq'
elif 'r1' in os.path.split(path)[-1].lower():
ending = '_R1.fastq'
else:
ending = '.fastq'
seqid = os.path.split(path)[-1].split('_')[0]
nfilename = self.inputs['rename'][seqid] + ending
self.t.time_print("Uploading as %s..." % nfilename)
else:
self.t.time_print('Uploading...')
if self.inputs is not None: # Automated
if self.rename:
try:
self.uploaded.append(nfilename)
self.gi.tools.upload_file(path, self.history_id,
file_name=nfilename)
except KeyError:
self.gi.tools.upload_file(path, self.history_id)
else:
self.gi.tools.upload_file(path, self.history_id)
else:
self.gi.tools.upload_file(path, self.history_id)
break
except bioblendConnectionError:
if attempts < self.max_attempts:
attempts += 1
self.t.time_print("[Warning] Failed to upload %s, retrying (attempt %d of %d)" %
(path, attempts, self.max_attempts))
time.sleep(5)
continue
else:
self.t.time_print("[Error] Failed to upload %s, after %d attempts." %
(path, self.max_attempts))
raise
except requests.exceptions.ConnectionError:
if attempts < self.max_attempts:
attempts += 1
self.t.time_print("Galaxy isn't responding...")
self.wait_for_problem()
self.t.time_print("[Warning] Failed to upload %s, retrying (attempt %d of %d)" %
(path, attempts, self.max_attempts))
continue
else:
self.t.time_print("[Error] Failed to upload %s, after %d attempts." %
(path, self.max_attempts))
raise
def extract_files(self):
from sequence_getter import SequenceGetter
from sequence_getter import ExtractionError
self.extractor = SequenceGetter(nasmnt=self.NASMNT, output=False)
if self.inputs is None:
path_to_list = os.path.join(self.script_dir, "retrieve.txt")
try:
f = open(path_to_list, "r")
# Get all of the ids in the file
ids = re.findall(r"(2\d{3}-\w{2,10}-\d{3,4})", f.read())
f.close()
except FileNotFoundError:
# create blank file
open(path_to_list, "w").close()
print("Please enter SEQids in the retrieve.txt file")
exit(1)
# Finds the invalid lines and output them
for line in open("retrieve.txt", "r"):
if line.rstrip("\n") not in ids and len(line.rstrip("\n")) > 2:
self.t.time_print("Invalid seqid: \"%s\"" % line.rstrip("\n"))
else:
ids = self.inputs['fastqs']
# Get paths of fastq's
path_list = []
err = ""
for seqid in ids:
for i in [1, 2]:
try:
path_list.append(self.extractor.retrieve_file(seqid.rstrip("\n"), filetype="fastq_R" + str(i),
getpathonly=True))
except ExtractionError as e:
err += e.message + '\n'
if self.reference is not None:
# Get fasta
try:
refpath = self.extractor.retrieve_file(self.reference, "fasta", getpathonly=True)
except ExtractionError as e:
err += e.message + '\n'
if len(err) > 0:
raise AutoSNVPhylError(err)
path_list.append(refpath)
else:
# Since there is no reference specified, check for one in the upload directory
self.t.time_print("No reference file specified, using the one in the upload directory")
found_ref = False
for file in os.listdir(os.path.join(self.script_dir, 'upload')):
if file.endswith(".fasta"):
if not found_ref:
self.t.time_print("Found " + file + ", using it as a reference...")
found_ref = True
else:
self.t.time_print("[Error] Found another reference file in upload folder, please only use one.")
exit(1)
if not found_ref:
self.t.time_print("[Error] No reference file(fasta) found. Cannot run.")
exit(1)
return path_list
def run_workflow(self):
while True:
try:
contents = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
datamap = dict()
found_ref = False
found_collection = True
# Find the reference file
for item in contents:
if item["history_content_type"] == "dataset" and item["extension"] == "fasta":
datamap['1'] = {
'src': 'hda',
'id': item['id']
}
found_ref = True
if item["name"] == "pair_list":
datamap['0'] = {
'src': 'hdca',
'id': item['id']
}
found_collection = True
if not found_ref:
self.t.time_print("[Error] Can't find a reference on Galaxy.")
raise AutoSNVPhylError("Can't find a reference on Galaxy.")
if not found_collection:
self.t.time_print("[Error] Can't find list of dataset pairs on Galaxy.")
raise AutoSNVPhylError("Can't find list of dataset pairs on Galaxy.")
min_coverage = "10"
min_mean_mapping = "30"
alternative_allele_proportion = "0.75"
params = { # Don't change this, it works
'5': {
'mindepth': min_coverage
},
'11': {
'coverage': min_coverage,
'mean_mapping': min_mean_mapping,
'ao': alternative_allele_proportion
},
}
while True:
try:
self.gi.workflows.invoke_workflow(self.WORKFLOW_ID, inputs=datamap,
params=params, history_id=self.history_id)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
def build_list(self):
while True:
try:
contents = self.gi.histories.show_history(self.history_id, contents=True)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
fastqs = []
# get fastq files
for item in contents:
if item["history_content_type"] == "dataset" and item["extension"] == "fastq":
fastqs.append(item)
# pair fastq files
r1s = []
r2s = []
for fastq in fastqs:
result1 = re.findall(r"(.+)_[Rr]1", fastq["name"], flags=0)
result2 = re.findall(r"(.+)_[Rr]2", fastq["name"], flags=0)
if len(result1) >= 1:
fastq["name"] = result1[0]
r1s.append(fastq)
if len(result2) >= 1:
fastq["name"] = result2[0]
r2s.append(fastq)
if len(r1s) != len(r2s):
self.t.time_print("[WARNING] There are different amounts of R1 and R2 files,"
" will only use ones that can be paired.")
pairs = []
done = []
for sequence in r1s:
for compare in r2s:
if sequence["name"] == compare["name"] and sequence["name"] not in done:
# Pair them
elements = [
collections.HistoryDatasetElement(name="forward", id=sequence["id"]),
collections.HistoryDatasetElement(name="reverse", id=compare["id"])
]
done.append(sequence["name"])
pairs.append(collections.CollectionElement(sequence["name"], type="paired", elements=elements))
collection_description = collections.CollectionDescription("pair_list", type="list:paired", elements=pairs)
while True:
try:
self.gi.histories.create_dataset_collection(self.history_id, collection_description)
break
except (ConnectionError, requests.exceptions.ConnectionError):
self.wait_for_problem()
def load(self):
from pyaccessories.SaveLoad import SaveLoad as SaveLoad
config = SaveLoad(os.path.join(self.script_dir, "config.json"), create=True)
self.API_KEY = config.get('api_key')
if not re.match(r"^\w{32}$", self.API_KEY):
self.t.time_print("Invalid Galaxy API key.")
exit(1)
self.WORKFLOW_ID = config.get('workflow_id', default='f2db41e1fa331b3e') # SNVPhyl paired end
if not re.match(r"^\w{16}$", self.WORKFLOW_ID):
self.t.time_print("Invalid workflow ID format.")
exit(1)
self.IP = config.get('ip', default="http://192.168.1.3:48888/")
self.NASMNT = os.path.normpath(config.get('nasmnt', default="/mnt/nas/"))
def wait_for_problem(self):
import time
short_wait = 5
time_until_giveup = 36
problem = True
while problem:
problem = False
try:
self.gi.histories.get_histories()
return
except (ConnectionError, requests.exceptions.ConnectionError) as e:
if e.status_code == 403: # Invalid API key
self.t.time_print("Invalid Galaxy API Key!")
exit(1)
elif 'Max retries exceeded' in str(e.args[0]):
self.t.time_print("Error: Galaxy isn't running/connection error.")
problem = True
if short_wait > 1:
self.t.time_print("Waiting 30 seconds...")
time.sleep(30)
short_wait -= 1
else:
self.t.time_print("Waiting 1 hour...")
time_until_giveup -= 1
if time_until_giveup < 1:
raise
time.sleep(3600)
else:
raise
def __init__(self, args_in, inputs=None):
self.max_attempts = 10
self.uploaded = [] # A list of all uploaded files
# constants sort of
self.IP = None
self.API_KEY = None
self.WORKFLOW_ID = None
self.NASMNT = None
self.inputs = inputs
self.gi = None
# Add arguments
self.reference = args_in.reference
self.noextract = args_in.noextract
self.NAME = args_in.history_name if args_in.history_name is not None else "AutoSNVPhyl_%s"\
% time.strftime("%d-%m-%Y")
self.manual = args_in.manual
self.script_dir = sys.path[0]
if not os.path.exists(os.path.join(self.script_dir, 'galaxy_logs')):
os.makedirs(os.path.join(self.script_dir, 'galaxy_logs'))
import datetime
self.t = Timer(log_file=os.path.join(self.script_dir, 'galaxy_logs',
datetime.datetime.now().strftime("%d-%m-%Y_%S:%M:%H")
+ "_%s.txt" % self.NAME))
self.t.set_colour(32)
self.rename = False
self.extractor = None
self.history_id = None
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--reference",
help="Input the seqid of the reference file. "
"Also tells the program to extract the fastqs in your retrieve.txt. "
"If this parameter is not given then it will use the files in your "
"upload folder, it will autodetect the reference file as long as it's "
"a fasta. ", type=str)
parser.add_argument("-e", "--noextract", action="store_true",
help="Use if you don't want any fastq files to be extracted from the nas.")
parser.add_argument("-n", "--history_name", type=str,
help="Name of the history to create")
parser.add_argument("-m", "--manual", action="store_true",
help="Use the files in your upload directory (can use this in addition to the files extracted)."
" If this flag is not used then it will clear the files in your upload directory.")
# If no arguments
if len(sys.argv) == 1:
parser.print_help()
exit(1)
args = parser.parse_args()
runner = AutoSNVPhyl(args)
runner.run()
|
{
"content_hash": "54d64589535bdfa5d82a4b621a3ed766",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 123,
"avg_line_length": 41.11933534743203,
"alnum_prop": 0.5055288196612909,
"repo_name": "devonpmack/auto-SNVPhyl",
"id": "13557173fb32c4591ecc7e774b0235d324da03c0",
"size": "27809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50242"
}
],
"symlink_target": ""
}
|
"""
This code is based on https://github.com/nwojke/deep_sort/blob/master/deep_sort/kalman_filter.py
"""
import numpy as np
import scipy.linalg
__all__ = ['KalmanFilter']
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919
}
class KalmanFilter(object):
"""
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, a, h, vx, vy, va, vh
contains the bounding box center position (x, y), aspect ratio a, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, a, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""
Create track from unassociated measurement.
Args:
measurement (ndarray): Bounding box coordinates (x, y, a, h) with
center position (x, y), aspect ratio a, and height h.
Returns:
The mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are
initialized to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[3],
2 * self._std_weight_position * measurement[3], 1e-2,
2 * self._std_weight_position * measurement[3],
10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[3], 1e-5,
10 * self._std_weight_velocity * measurement[3]
]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""
Run Kalman filter prediction step.
Args:
mean (ndarray): The 8 dimensional mean vector of the object state
at the previous time step.
covariance (ndarray): The 8x8 dimensional covariance matrix of the
object state at the previous time step.
Returns:
The mean vector and covariance matrix of the predicted state.
Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[3], self._std_weight_position *
mean[3], 1e-2, self._std_weight_position * mean[3]
]
std_vel = [
self._std_weight_velocity * mean[3], self._std_weight_velocity *
mean[3], 1e-5, self._std_weight_velocity * mean[3]
]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
#mean = np.dot(self._motion_mat, mean)
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot(
(self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""
Project state distribution to measurement space.
Args
mean (ndarray): The state's mean vector (8 dimensional array).
covariance (ndarray): The state's covariance matrix (8x8 dimensional).
Returns:
The projected mean and covariance matrix of the given state estimate.
"""
std = [
self._std_weight_position * mean[3], self._std_weight_position *
mean[3], 1e-1, self._std_weight_position * mean[3]
]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance,
self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance):
"""
Run Kalman filter prediction step (Vectorized version).
Args:
mean (ndarray): The Nx8 dimensional mean matrix of the object states
at the previous time step.
covariance (ndarray): The Nx8x8 dimensional covariance matrics of the
object states at the previous time step.
Returns:
The mean vector and covariance matrix of the predicted state.
Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 3], self._std_weight_position *
mean[:, 3], 1e-2 * np.ones_like(mean[:, 3]),
self._std_weight_position * mean[:, 3]
]
std_vel = [
self._std_weight_velocity * mean[:, 3], self._std_weight_velocity *
mean[:, 3], 1e-5 * np.ones_like(mean[:, 3]),
self._std_weight_velocity * mean[:, 3]
]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = []
for i in range(len(mean)):
motion_cov.append(np.diag(sqr[i]))
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement):
"""
Run Kalman filter correction step.
Args:
mean (ndarray): The predicted state's mean vector (8 dimensional).
covariance (ndarray): The state's covariance matrix (8x8 dimensional).
measurement (ndarray): The 4 dimensional measurement vector
(x, y, a, h), where (x, y) is the center position, a the aspect
ratio, and h the height of the bounding box.
Returns:
The measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(
projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower),
np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot(
(kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self,
mean,
covariance,
measurements,
only_position=False,
metric='maha'):
"""
Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Args:
mean (ndarray): Mean vector over the state distribution (8
dimensional).
covariance (ndarray): Covariance of the state distribution (8x8
dimensional).
measurements (ndarray): An Nx4 dimensional matrix of N measurements,
each in format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position (Optional[bool]): If True, distance computation is
done with respect to the bounding box center position only.
metric (str): Metric type, 'gaussian' or 'maha'.
Returns
An array of length N, where the i-th element contains the squared
Mahalanobis distance between (mean, covariance) and `measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
d = measurements - mean
if metric == 'gaussian':
return np.sum(d * d, axis=1)
elif metric == 'maha':
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(
cholesky_factor,
d.T,
lower=True,
check_finite=False,
overwrite_b=True)
squared_maha = np.sum(z * z, axis=0)
return squared_maha
else:
raise ValueError('invalid distance metric')
|
{
"content_hash": "0dbf6ae1fce1b876f2c8195f17356039",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 96,
"avg_line_length": 38.09842519685039,
"alnum_prop": 0.5612276531983053,
"repo_name": "PaddlePaddle/models",
"id": "84c41ffd40960fc82a9611bc425857982f61ebaa",
"size": "10300",
"binary": false,
"copies": "2",
"ref": "refs/heads/release/2.3",
"path": "modelcenter/PP-Vehicle/APP/pptracking/python/mot/motion/kalman_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46835"
},
{
"name": "CMake",
"bytes": "8248"
},
{
"name": "Jupyter Notebook",
"bytes": "1720166"
},
{
"name": "Makefile",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "3099697"
},
{
"name": "Shell",
"bytes": "70177"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import CommandError
from zerver.lib.actions import do_update_message_flags
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Message
class Command(ZulipBaseCommand):
help = """Bankrupt one or many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('emails', metavar='<email>', type=str, nargs='+',
help='email address to bankrupt')
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
for email in options['emails']:
try:
user_profile = self.get_user(email, realm)
except CommandError:
print("e-mail %s doesn't exist in the realm %s, skipping" % (email, realm))
continue
do_update_message_flags(user_profile, "add", "read", None, True, None, None)
messages = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
old_pointer = user_profile.pointer
new_pointer = messages[0].id
user_profile.pointer = new_pointer
user_profile.save(update_fields=["pointer"])
print("%s: %d => %d" % (email, old_pointer, new_pointer))
else:
print("%s has no messages, can't bankrupt!" % (email,))
|
{
"content_hash": "63d699a716ef0acc4c74083397c6183a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 91,
"avg_line_length": 39.69047619047619,
"alnum_prop": 0.5950809838032394,
"repo_name": "jrowan/zulip",
"id": "8b2215e1ca2bc41be8a8b8c3327af4a360a58cb1",
"size": "1667",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/management/commands/bankrupt_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400886"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "470981"
},
{
"name": "JavaScript",
"bytes": "2070164"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "87465"
},
{
"name": "Python",
"bytes": "3500902"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "38344"
}
],
"symlink_target": ""
}
|
""" Create a bin directory that symlinks the various scripts and
tools from my code repository.
"""
import sys, os
if __name__ == "__main__":
usage = "createBin.py [code-repo] [new-bin]"
symlinks = { # filename: linkname,
'alarm': 'alarm',
'centurion': 'centurion',
'detach': 'detach',
'm3u.sh': 'm3u',
'msgOn': 'msgOn',
'nisDiscover.py': 'nisDiscover',
'pidwatch.py': 'pidwatch',
'processSnitch.py': 'processSnitch',
'pygrep': 'pygrep',
'random': 'random',
'rd-back.sh': 'backup',
'rmpyc': 'rmpyc',
'sshfs.py': 'sfs',
'timer': 'timer',
'tree': 'tree',
'tsrename.sh': 'tsrename',
'wikiLookup.sh': 'wikiLookup'
}
args = sys.argv[:]
args.pop(0)
print args
if ("-h" in args) or ('--help' in args) or (len(args) != 2):
print usage
sys.exit(0)
repo = args[0]
if not os.path.exists(repo):
print usage
sys.exit(0)
bin = args[1]
if not os.path.exists(bin):
print "Creating new bin directory..."
os.mkdir(bin)
skipped = []
for fname, link in symlinks.iteritems():
src = os.path.join(repo, fname)
dest = os.path.join(bin, link)
if not os.path.exists(src):
skipped.append(fname)
print "linking %s -> %s" % (src, dest)
os.symlink(src, dest)
print "The following links are not available: %s" % skipped
|
{
"content_hash": "1538a16337afacc8992d97ae7bf0cf57",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 64,
"avg_line_length": 29.836363636363636,
"alnum_prop": 0.47958561852528947,
"repo_name": "fretboardfreak/code",
"id": "0debaffe3edca1230cda11341c9fa47112d92564",
"size": "1663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abandoned/createBin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "649"
},
{
"name": "C++",
"bytes": "1599"
},
{
"name": "CSS",
"bytes": "83144"
},
{
"name": "HTML",
"bytes": "11830"
},
{
"name": "Java",
"bytes": "379"
},
{
"name": "JavaScript",
"bytes": "19508"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "PHP",
"bytes": "3691"
},
{
"name": "Perl",
"bytes": "1063"
},
{
"name": "Python",
"bytes": "273951"
},
{
"name": "Shell",
"bytes": "81945"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name="multipool",
version="0.10.1",
description="Efficient multi-locus genetic mapping with pooled sequencing.",
author="Matt Edwards",
author_email="matted@mit.edu",
license="MIT",
url="https://github.com/matted/multipool",
packages=[],
scripts=["mp_inference.py"],
zip_safe=True,
install_requires=["scipy", "numpy"], # pylab is optional; leaving it out for now
)
|
{
"content_hash": "d67a4edb32d12d3e051d0777ada0f3eb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 33.07142857142857,
"alnum_prop": 0.6414686825053996,
"repo_name": "matted/multipool",
"id": "cd1f450d52087458752a3e27d5ae04ff23e348b6",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18233"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import tempfile
from target_test import FileSystemTargetTestMixin
from helpers import with_config, unittest, skipOnTravis
from boto.exception import S3ResponseError
from boto.s3 import key
from moto import mock_s3
from moto import mock_sts
from luigi import configuration
from luigi.contrib.s3 import FileNotFoundException, InvalidDeleteException, S3Client, S3Target
from luigi.target import MissingParentDirectory
if (3, 4, 0) <= sys.version_info[:3] < (3, 4, 3):
# spulec/moto#308
raise unittest.SkipTest('moto mock doesn\'t work with python3.4')
AWS_ACCESS_KEY = "XXXXXXXXXXXXXXXXXXXX"
AWS_SECRET_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
class TestS3Target(unittest.TestCase, FileSystemTargetTestMixin):
def setUp(self):
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
self.tempFileContents = (
b"I'm a temporary file for testing\nAnd this is the second line\n"
b"This is the third.")
self.tempFilePath = f.name
f.write(self.tempFileContents)
f.close()
self.addCleanup(os.remove, self.tempFilePath)
self.mock_s3 = mock_s3()
self.mock_s3.start()
self.addCleanup(self.mock_s3.stop)
def create_target(self, format=None, **kwargs):
client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
client.s3.create_bucket('mybucket')
return S3Target('s3://mybucket/test_file', client=client, format=format, **kwargs)
def test_read(self):
client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
client.s3.create_bucket('mybucket')
client.put(self.tempFilePath, 's3://mybucket/tempfile')
t = S3Target('s3://mybucket/tempfile', client=client)
read_file = t.open()
file_str = read_file.read()
self.assertEqual(self.tempFileContents, file_str.encode('utf-8'))
def test_read_no_file(self):
t = self.create_target()
self.assertRaises(FileNotFoundException, t.open)
def test_read_no_file_sse(self):
t = self.create_target(encrypt_key=True)
self.assertRaises(FileNotFoundException, t.open)
def test_read_iterator_long(self):
# write a file that is 5X the boto buffersize
# to test line buffering
old_buffer = key.Key.BufferSize
key.Key.BufferSize = 2
try:
tempf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
temppath = tempf.name
firstline = ''.zfill(key.Key.BufferSize * 5) + os.linesep
contents = firstline + 'line two' + os.linesep + 'line three'
tempf.write(contents.encode('utf-8'))
tempf.close()
client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
client.s3.create_bucket('mybucket')
client.put(temppath, 's3://mybucket/largetempfile')
t = S3Target('s3://mybucket/largetempfile', client=client)
with t.open() as read_file:
lines = [line for line in read_file]
finally:
key.Key.BufferSize = old_buffer
self.assertEqual(3, len(lines))
self.assertEqual(firstline, lines[0])
self.assertEqual("line two" + os.linesep, lines[1])
self.assertEqual("line three", lines[2])
def test_get_path(self):
t = self.create_target()
path = t.path
self.assertEqual('s3://mybucket/test_file', path)
def test_get_path_sse(self):
t = self.create_target(encrypt_key=True)
path = t.path
self.assertEqual('s3://mybucket/test_file', path)
class TestS3Client(unittest.TestCase):
def setUp(self):
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
self.tempFilePath = f.name
self.tempFileContents = b"I'm a temporary file for testing\n"
f.write(self.tempFileContents)
f.close()
self.addCleanup(os.remove, self.tempFilePath)
self.mock_s3 = mock_s3()
self.mock_s3.start()
self.mock_sts = mock_sts()
self.mock_sts.start()
self.addCleanup(self.mock_s3.stop)
self.addCleanup(self.mock_sts.stop)
def test_init_with_environment_variables(self):
os.environ['AWS_ACCESS_KEY_ID'] = 'foo'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'bar'
# Don't read any exsisting config
old_config_paths = configuration.LuigiConfigParser._config_paths
configuration.LuigiConfigParser._config_paths = [tempfile.mktemp()]
s3_client = S3Client()
configuration.LuigiConfigParser._config_paths = old_config_paths
self.assertEqual(s3_client.s3.gs_access_key_id, 'foo')
self.assertEqual(s3_client.s3.gs_secret_access_key, 'bar')
@with_config({'s3': {'aws_access_key_id': 'foo', 'aws_secret_access_key': 'bar'}})
def test_init_with_config(self):
s3_client = S3Client()
self.assertEqual(s3_client.s3.access_key, 'foo')
self.assertEqual(s3_client.s3.secret_key, 'bar')
@with_config({'s3': {'aws_role_arn': 'role', 'aws_role_session_name': 'name'}})
def test_init_with_config_and_roles(self):
s3_client = S3Client()
self.assertEqual(s3_client.s3.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(s3_client.s3.secret_key, 'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
def test_put(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/putMe')
self.assertTrue(s3_client.exists('s3://mybucket/putMe'))
def test_put_sse(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/putMe', encrypt_key=True)
self.assertTrue(s3_client.exists('s3://mybucket/putMe'))
def test_put_string(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("SOMESTRING", 's3://mybucket/putString')
self.assertTrue(s3_client.exists('s3://mybucket/putString'))
def test_put_string_sse(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("SOMESTRING", 's3://mybucket/putString', encrypt_key=True)
self.assertTrue(s3_client.exists('s3://mybucket/putString'))
def test_put_multipart_multiple_parts_non_exact_fit(self):
"""
Test a multipart put with two parts, where the parts are not exactly the split size.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = (part_size * 2) - 5000
self._run_multipart_test(part_size, file_size)
def test_put_multipart_multiple_parts_non_exact_fit_with_sse(self):
"""
Test a multipart put with two parts, where the parts are not exactly the split size.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = (part_size * 2) - 5000
self._run_multipart_test(part_size, file_size, encrypt_key=True)
def test_put_multipart_multiple_parts_exact_fit(self):
"""
Test a multipart put with multiple parts, where the parts are exactly the split size.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = part_size * 2
self._run_multipart_test(part_size, file_size)
def test_put_multipart_multiple_parts_exact_fit_wit_sse(self):
"""
Test a multipart put with multiple parts, where the parts are exactly the split size.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = part_size * 2
self._run_multipart_test(part_size, file_size, encrypt_key=True)
def test_put_multipart_less_than_split_size(self):
"""
Test a multipart put with a file smaller than split size; should revert to regular put.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = 5000
self._run_multipart_test(part_size, file_size)
def test_put_multipart_less_than_split_size_with_sse(self):
"""
Test a multipart put with a file smaller than split size; should revert to regular put.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = 5000
self._run_multipart_test(part_size, file_size, encrypt_key=True)
def test_put_multipart_empty_file(self):
"""
Test a multipart put with an empty file.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = 0
self._run_multipart_test(part_size, file_size)
def test_put_multipart_empty_file_with_sse(self):
"""
Test a multipart put with an empty file.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = 0
self._run_multipart_test(part_size, file_size, encrypt_key=True)
def test_exists(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertTrue(s3_client.exists('s3://mybucket/'))
self.assertTrue(s3_client.exists('s3://mybucket'))
self.assertFalse(s3_client.exists('s3://mybucket/nope'))
self.assertFalse(s3_client.exists('s3://mybucket/nope/'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempfile')
self.assertTrue(s3_client.exists('s3://mybucket/tempfile'))
self.assertFalse(s3_client.exists('s3://mybucket/temp'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir0_$folder$')
self.assertTrue(s3_client.exists('s3://mybucket/tempdir0'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir1/')
self.assertTrue(s3_client.exists('s3://mybucket/tempdir1'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir2/subdir')
self.assertTrue(s3_client.exists('s3://mybucket/tempdir2'))
self.assertFalse(s3_client.exists('s3://mybucket/tempdir'))
def test_get(self):
# put a file on s3 first
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/putMe')
tmp_file = tempfile.NamedTemporaryFile(delete=True)
tmp_file_path = tmp_file.name
s3_client.get('s3://mybucket/putMe', tmp_file_path)
self.assertEqual(tmp_file.read(), self.tempFileContents)
tmp_file.close()
def test_get_as_string(self):
# put a file on s3 first
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/putMe')
contents = s3_client.get_as_string('s3://mybucket/putMe')
self.assertEqual(contents, self.tempFileContents)
def test_get_key(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/key_to_find')
self.assertTrue(s3_client.get_key('s3://mybucket/key_to_find'))
self.assertFalse(s3_client.get_key('s3://mybucket/does_not_exist'))
def test_isdir(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertTrue(s3_client.isdir('s3://mybucket'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir0_$folder$')
self.assertTrue(s3_client.isdir('s3://mybucket/tempdir0'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir1/')
self.assertTrue(s3_client.isdir('s3://mybucket/tempdir1'))
s3_client.put(self.tempFilePath, 's3://mybucket/key')
self.assertFalse(s3_client.isdir('s3://mybucket/key'))
def test_mkdir(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertTrue(s3_client.isdir('s3://mybucket'))
s3_client.mkdir('s3://mybucket')
s3_client.mkdir('s3://mybucket/dir')
self.assertTrue(s3_client.isdir('s3://mybucket/dir'))
self.assertRaises(MissingParentDirectory,
s3_client.mkdir, 's3://mybucket/dir/foo/bar', parents=False)
self.assertFalse(s3_client.isdir('s3://mybucket/dir/foo/bar'))
def test_listdir(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("", 's3://mybucket/hello/frank')
s3_client.put_string("", 's3://mybucket/hello/world')
self.assertEqual(['s3://mybucket/hello/frank', 's3://mybucket/hello/world'],
list(s3_client.listdir('s3://mybucket/hello')))
def test_list(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("", 's3://mybucket/hello/frank')
s3_client.put_string("", 's3://mybucket/hello/world')
self.assertEqual(['frank', 'world'],
list(s3_client.list('s3://mybucket/hello')))
def test_listdir_key(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("", 's3://mybucket/hello/frank')
s3_client.put_string("", 's3://mybucket/hello/world')
self.assertEqual([True, True],
[x.exists() for x in s3_client.listdir('s3://mybucket/hello', return_key=True)])
def test_list_key(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("", 's3://mybucket/hello/frank')
s3_client.put_string("", 's3://mybucket/hello/world')
self.assertEqual([True, True],
[x.exists() for x in s3_client.list('s3://mybucket/hello', return_key=True)])
def test_remove(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertRaises(
S3ResponseError,
lambda: s3_client.remove('s3://bucketdoesnotexist/file')
)
self.assertFalse(s3_client.remove('s3://mybucket/doesNotExist'))
s3_client.put(self.tempFilePath, 's3://mybucket/existingFile0')
self.assertTrue(s3_client.remove('s3://mybucket/existingFile0'))
self.assertFalse(s3_client.exists('s3://mybucket/existingFile0'))
self.assertRaises(
InvalidDeleteException,
lambda: s3_client.remove('s3://mybucket/')
)
self.assertRaises(
InvalidDeleteException,
lambda: s3_client.remove('s3://mybucket')
)
s3_client.put(self.tempFilePath, 's3://mybucket/removemedir/file')
self.assertRaises(
InvalidDeleteException,
lambda: s3_client.remove('s3://mybucket/removemedir', recursive=False)
)
# test that the marker file created by Hadoop S3 Native FileSystem is removed
s3_client.put(self.tempFilePath, 's3://mybucket/removemedir/file')
s3_client.put_string("", 's3://mybucket/removemedir_$folder$')
self.assertTrue(s3_client.remove('s3://mybucket/removemedir'))
self.assertFalse(s3_client.exists('s3://mybucket/removemedir_$folder$'))
def test_copy_multiple_parts_non_exact_fit(self):
"""
Test a multipart put with two parts, where the parts are not exactly the split size.
"""
# First, put a file into S3
self._run_copy_test(self.test_put_multipart_multiple_parts_non_exact_fit)
def test_copy_multiple_parts_exact_fit(self):
"""
Test a copy multiple parts, where the parts are exactly the split size.
"""
self._run_copy_test(self.test_put_multipart_multiple_parts_exact_fit)
def test_copy_less_than_split_size(self):
"""
Test a copy with a file smaller than split size; should revert to regular put.
"""
self._run_copy_test(self.test_put_multipart_less_than_split_size)
def test_copy_empty_file(self):
"""
Test a copy with an empty file.
"""
self._run_copy_test(self.test_put_multipart_empty_file)
def test_copy_multipart_multiple_parts_non_exact_fit(self):
"""
Test a multipart copy with two parts, where the parts are not exactly the split size.
"""
# First, put a file into S3
self._run_multipart_copy_test(self.test_put_multipart_multiple_parts_non_exact_fit)
def test_copy_multipart_multiple_parts_exact_fit(self):
"""
Test a multipart copy with multiple parts, where the parts are exactly the split size.
"""
self._run_multipart_copy_test(self.test_put_multipart_multiple_parts_exact_fit)
def test_copy_multipart_less_than_split_size(self):
"""
Test a multipart copy with a file smaller than split size; should revert to regular put.
"""
self._run_multipart_copy_test(self.test_put_multipart_less_than_split_size)
def test_copy_multipart_empty_file(self):
"""
Test a multipart copy with an empty file.
"""
self._run_multipart_copy_test(self.test_put_multipart_empty_file)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/145895385')
def test_copy_dir(self):
"""
Test copying 20 files from one folder to another
"""
n = 20
copy_part_size = (1024 ** 2) * 5
# Note we can't test the multipart copy due to moto issue #526
# so here I have to keep the file size smaller than the copy_part_size
file_size = 5000
s3_dir = 's3://mybucket/copydir/'
file_contents = b"a" * file_size
tmp_file = tempfile.NamedTemporaryFile(mode='wb', delete=True)
tmp_file_path = tmp_file.name
tmp_file.write(file_contents)
tmp_file.flush()
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
for i in range(n):
file_path = s3_dir + str(i)
s3_client.put_multipart(tmp_file_path, file_path)
self.assertTrue(s3_client.exists(file_path))
s3_dest = 's3://mybucket/copydir_new/'
s3_client.copy(s3_dir, s3_dest, threads=10, part_size=copy_part_size)
for i in range(n):
original_size = s3_client.get_key(s3_dir + str(i)).size
copy_size = s3_client.get_key(s3_dest + str(i)).size
self.assertEqual(original_size, copy_size)
def _run_multipart_copy_test(self, put_method):
# Run the method to put the file into s3 into the first place
put_method()
# As all the multipart put methods use `self._run_multipart_test`
# we can just use this key
original = 's3://mybucket/putMe'
copy = 's3://mybucket/putMe_copy'
# 5MB is minimum part size, use it here so we don't have to generate huge files to test
# the multipart upload in moto
part_size = (1024 ** 2) * 5
# Copy the file from old location to new
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.copy(original, copy, part_size=part_size, threads=4)
# We can't use etags to compare between multipart and normal keys,
# so we fall back to using the size instead
original_size = s3_client.get_key(original).size
copy_size = s3_client.get_key(copy).size
self.assertEqual(original_size, copy_size)
def _run_copy_test(self, put_method):
# Run the method to put the file into s3 into the first place
put_method()
# As all the multipart put methods use `self._run_multipart_test`
# we can just use this key
original = 's3://mybucket/putMe'
copy = 's3://mybucket/putMe_copy'
# Copy the file from old location to new
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.copy(original, copy, threads=4)
# We can't use etags to compare between multipart and normal keys,
# so we fall back to using the file size
original_size = s3_client.get_key(original).size
copy_size = s3_client.get_key(copy).size
self.assertEqual(original_size, copy_size)
def _run_multipart_test(self, part_size, file_size, **kwargs):
file_contents = b"a" * file_size
s3_path = 's3://mybucket/putMe'
tmp_file = tempfile.NamedTemporaryFile(mode='wb', delete=True)
tmp_file_path = tmp_file.name
tmp_file.write(file_contents)
tmp_file.flush()
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_multipart(tmp_file_path, s3_path, part_size=part_size, **kwargs)
self.assertTrue(s3_client.exists(s3_path))
file_size = os.path.getsize(tmp_file.name)
key_size = s3_client.get_key(s3_path).size
self.assertEqual(file_size, key_size)
tmp_file.close()
|
{
"content_hash": "d04f9812f02e7b4aa947a05ea2467bdb",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 105,
"avg_line_length": 39.345588235294116,
"alnum_prop": 0.6276864137544385,
"repo_name": "republic-analytics/luigi",
"id": "96ae90646c553ff9786c3a24d5687bec6b69bd9f",
"size": "22007",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/contrib/s3_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "34521"
},
{
"name": "JavaScript",
"bytes": "82786"
},
{
"name": "Python",
"bytes": "1358575"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
}
|
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
import datetime
import re
import json
import logging
from scrapy import Selector
from cameo.utility import Utility
from cameo.localdb import LocalDbForCRUNCHBASE
from crawlermaster.cmparser import CmParser
from cameo.cmConverter.converterForCRUNCHBASE import ConverterForCRUNCHBASE
"""
從 source_html 的 HTML 檔案解析資料
結果放置於 parsed_result 下
"""
class ParserForCRUNCHBASE:
#建構子
def __init__(self):
self.utility = Utility()
self.db = LocalDbForCRUNCHBASE()
self.dicSubCommandHandler = {
"search_funding_rounds":[self.parseSearchFundingRoundsPage],
"search_investors":[self.parseSearchInvestorsPage],
"organization":[self.parseOrganizationPage],
"cb_companies.csv":[self.parseCompaniesCsv]
}
self.SOURCE_HTML_BASE_FOLDER_PATH = u"cameo_res\\source_html"
self.PARSED_RESULT_BASE_FOLDER_PATH = u"cameo_res\\parsed_result"
self.dicParsedResultOfStartup = {} #startup.json 資料
#取得 parser 使用資訊
def getUseageMessage(self):
return ("- CRUNCHBASE -\n"
"useage:\n"
"search_funding_rounds - parse funding_rounds.html then insert organization url to localdb \n"
"search_investors - parse investors.html then insert organization url to localdb \n"
"organization - parse organization.html then create .json \n"
"cb_companies.csv - parse cb_companies.csv then insert organization url to localdb \n"
)
#執行 parser
def runParser(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
for handler in self.dicSubCommandHandler[strSubcommand]:
handler(strArg1)
#funding rounds #####################################################################################
#解析 funding_rounds.html
def parseSearchFundingRoundsPage(self, uselessArg1=None):
strFundingRoundsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\CRUNCHBASE"
lstStrFundingRoundsHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strFundingRoundsHtmlFolderPath, strSuffixes="funding_rounds.html")
strFundingRoundsResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + u"\\CRUNCHBASE"
if not os.path.exists(strFundingRoundsResultFolderPath):
os.mkdir(strFundingRoundsResultFolderPath) #mkdir parsed_result/CRUNCHBASE/
for strFundingRoundsHtmlFilePath in lstStrFundingRoundsHtmlFilePath:
with open(strFundingRoundsHtmlFilePath, "r") as fundingRoundsHtmlFile:
strPageSource = fundingRoundsHtmlFile.read()
root = Selector(text=strPageSource)
lstStrOrganizationUrl = root.css("div.cbRow div.cbCell:nth-of-type(3) span.identifier a.cb-link::attr(href)").extract()
for strOrganizationUrl in lstStrOrganizationUrl:
self.db.insertOrganizationUrlIfNotExists(strOrganizationUrl=strOrganizationUrl)
#investors #####################################################################################
#解析 investors.html
def parseSearchInvestorsPage(self, uselessArg1=None):
strInvestorsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\CRUNCHBASE"
lstStrInvestorsHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strInvestorsHtmlFolderPath, strSuffixes="investors.html")
strInvestorsResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + u"\\CRUNCHBASE"
if not os.path.exists(strInvestorsResultFolderPath):
os.mkdir(strInvestorsResultFolderPath) #mkdir parsed_result/CRUNCHBASE/
for strInvestorsHtmlFilePath in lstStrInvestorsHtmlFilePath:
with open(strInvestorsHtmlFilePath, "r") as investorsHtmlFile:
strPageSource = investorsHtmlFile.read()
root = Selector(text=strPageSource)
lstStrOrganizationUrl = root.css("TODO").extract()
for strOrganizationUrl in lstStrOrganizationUrl:
self.db.insertOrganizationUrlIfNotExists(strOrganizationUrl=strOrganizationUrl)
#organization #####################################################################################
#解析 organization.html
def parseOrganizationPage(self, uselessArg1=None):
strOrganizationResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + u"\\CRUNCHBASE\\organization"
strOrganizationHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\CRUNCHBASE\\organization"
if not os.path.exists(strOrganizationResultFolderPath):
os.mkdir(strOrganizationResultFolderPath) #mkdir parsed_result/CRUNCHBASE/organization/
#organization.html
strCssJsonFilePath = "cameo_res\\selector_rule\\crunchbase_organization.json"
cmParser = CmParser(strCssJsonFilePath=strCssJsonFilePath)
rawDataConverter = ConverterForCRUNCHBASE()
lstDicOrganizationPageRawData = cmParser.localHtmlFileParse(
strBasedir=strOrganizationHtmlFolderPath,
strSuffixes="_organization.html",
isIterable=True,
isResetIteration=True
)
#convert
intStartupJsonIndex = 1
while len(lstDicOrganizationPageRawData)>0:
strStartupJsonFilePath = strOrganizationResultFolderPath + u"\\%d_startup.json"%(intStartupJsonIndex*1000)
rawDataConverter.convertStartup(lstLstDicRawData=[lstDicOrganizationPageRawData])
rawDataConverter.flushConvertedStartupDataToJsonFile(strJsonFilePath=strStartupJsonFilePath)
intStartupJsonIndex = intStartupJsonIndex+1
lstDicOrganizationPageRawData = cmParser.localHtmlFileParse(
strBasedir=strOrganizationHtmlFolderPath,
strSuffixes="_organization.html",
isIterable=True
)
#CB_companies.csv ##################################################################################
#解析 CB_companies.csv
def parseCompaniesCsv(self, uselessArg1=None):
strCompaniesCsvFilePath = u"cameo_res\\CB_companies.csv"
with open(strCompaniesCsvFilePath, "r") as companiesCsvFile:
for strCompanyUrlLine in companiesCsvFile:
lstStrCompanyUrlLine = strCompanyUrlLine.split(",")
if lstStrCompanyUrlLine[0] == "Mark":
strOrganizationUrl = lstStrCompanyUrlLine[1]
self.db.insertOrganizationUrlIfNotExists(strOrganizationUrl=strOrganizationUrl)
|
{
"content_hash": "ea9c4d20ffa7be08eb106f26e979ef1c",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 160,
"avg_line_length": 54.475806451612904,
"alnum_prop": 0.6712065136935603,
"repo_name": "muchu1983/104_cameo",
"id": "4a5c6e8b8f3f68894662f4112b03e2bcbc0996f5",
"size": "6849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cameo/parserForCRUNCHBASE.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4028"
},
{
"name": "HTML",
"bytes": "885957"
},
{
"name": "Python",
"bytes": "738810"
}
],
"symlink_target": ""
}
|
import importlib
import pickle as p
def process(task):
"""
Unele task-uri au nevoie de procesare inante de a fi trimise catre MQ
Procesatorul va adauga informatiile lipsa inainte de a face push.
"""
procesor = None
try:
procesor = importlib.import_module(
'jrunner.jobqueue.preprocesors.%s' % str(task.resource))
except Exception as err:
pass
if procesor is None:
return task
return procesor.process(task)
|
{
"content_hash": "19e79e78d352f51444d993165fb981af",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 24.5,
"alnum_prop": 0.6469387755102041,
"repo_name": "gabriel-samfira/jrunner",
"id": "56a20abed589eb8d2e6199e02bd544b9a90cc187",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jrunner/jobqueue/preprocesors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122690"
},
{
"name": "Shell",
"bytes": "2903"
}
],
"symlink_target": ""
}
|
import sys
import os
from setuptools import setup
from distutils.sysconfig import get_python_lib
import yass
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent YASS are
# still present in site-packages.
overlay_warning = False
existing_path = None
if 'install' in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "yass"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
def _(fname: str) -> str:
"""
Utility function to read the README file.
Used to fill the *long_description* field.
It's nice, because now
* we have a top level README file, and
* it's easier to type in the README file than to put a raw string in below
:param fname: README file name
:type fname: str
:return: README file content
:rtype: str
"""
with open(os.path.join(os.getcwd(), fname)) as readme:
content = readme.read() or '' # prevent ``content = None``
return content
setup(
name=yass.__lname__,
version=yass.__version__,
author=yass.__author__,
author_email=yass.__author_email__,
description='{name} is a plugin-powered search engine based subdomainer'.format(name=yass.__uname__),
long_description=_('README'),
license='Apache License, Version 2.0',
keywords='subdomain crawling information-gathering',
url=yass.__source_url__,
scripts=['bin/yass'],
packages=['yass'],
install_requires=['pyquery >=1.2.9', 'colorama'],
platforms=['OS Independent'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Pentesters',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Security',
]
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed {name} over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
{name}. This is known to cause a variety of problems.
You should manually remove the
{existing_path}
directory and re-install {name}.
""".format(name=yass.__uname__, existing_path=existing_path))
|
{
"content_hash": "92e911dad34bb51803934c8021c8d97a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 105,
"avg_line_length": 33.6551724137931,
"alnum_prop": 0.657103825136612,
"repo_name": "mrnfrancesco/yass",
"id": "912c973d86e9095db39234d334558c1d90421f86",
"size": "2952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "377"
},
{
"name": "Python",
"bytes": "28323"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, division
import sys
import os
import time
import datetime
import argparse
from Bio import SeqIO
###############################################################################
# FUNCTION DEFINITIONS
###############################################################################
def timestamp():
"""
Return time stamp.
"""
t = time.time()
fmt = '[%Y-%m-%d %H:%M:%S]'
return datetime.datetime.fromtimestamp(t).strftime(fmt)
def print_status(msg, end='\n'):
"""
Print status message.
"""
print('{} {}'.format(timestamp(), msg), file=sys.stderr, end=end)
sys.stderr.flush()
def reprint(msg, fh=sys.stderr):
"""
Print status message with carriage return
"""
print('\033[1A\033[K{}\n\033[1B'.format(msg), end='', file=fh)
fh.flush()
def check_file(file_path, directory_path=None):
"""
Check if file exists.
"""
if directory_path:
file_path = os.path.join(directory_path, file_path)
return os.path.isfile(file_path)
def exit_script(num=1):
"""
Exit script.
"""
sys.exit(num)
###############################################################################
# ARGUMENT PARSING
###############################################################################
parser = argparse.ArgumentParser(description='Convert SFF file to '
'FASTA or FASTQ')
parser.add_argument('sff', help='Input SFF file path')
parser.add_argument('out_file', help='Output file path '
'(without format extension) ')
parser.add_argument('format', help='Output format',
choices=['fasta', 'fastq'], default='fasta')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose output')
args = parser.parse_args()
# Check to see if input file exists
if not check_file(args.sff):
print('File', args.sff, 'does not exist', file=sys.stderr)
parser.print_help()
exit_script()
###############################################################################
# RUN CONVERSION
###############################################################################
if args.verbose:
print_status('Converting ' + args.sff + ' into a ' + args.format + ' file')
SeqIO.convert(args.sff, 'sff', args.out_file + '.' + args.format, args.format)
print_status('Script complete!')
|
{
"content_hash": "38496b059c0b74ef6138defcb3a5275f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 30.320987654320987,
"alnum_prop": 0.48900651465798045,
"repo_name": "dacuevas/bioinformatics",
"id": "c47e99a480978fa48a528b694ca2612494bc8309",
"size": "2456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sequence_data/sff_to_fastq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89452"
},
{
"name": "R",
"bytes": "1839"
}
],
"symlink_target": ""
}
|
import os
import qutip
import sys
import timeit
if len(sys.argv) != 3:
sys.exit("Please specify the maximum number of cores and qubits!")
num_cores = int(sys.argv[1]) # max number of cores
n = int(sys.argv[2]) # number of qubits
D = 2 ** n # total dimension
os.environ['OPENBLAS_NUM_THREADS'] = str(num_cores)
os.environ['MKL_NUM_THREADS'] = str(num_cores)
qutip.settings.num_cpus = num_cores
result = qutip.rand_herm(D, dims=[[2] * n, [2] * n])
# start timing
start_time = timeit.default_timer()
# partial trace over the first qubit
result = result.ptrace(range(1, n))
elapsed = timeit.default_timer() - start_time
# end timing
print("{0}, {1}, {2}".format(num_cores, n, elapsed))
|
{
"content_hash": "d70b8fc7be1fb66b91326413cbf7e09a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 24.96551724137931,
"alnum_prop": 0.6560773480662984,
"repo_name": "vsoftco/qpp",
"id": "06a72781185fd428fda6cbceadd16e0416440452",
"size": "784",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "stress_tests/python/ptrace_qutip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15516"
},
{
"name": "C++",
"bytes": "4478995"
},
{
"name": "CMake",
"bytes": "55370"
},
{
"name": "M4",
"bytes": "25814"
},
{
"name": "Makefile",
"bytes": "19625"
},
{
"name": "Python",
"bytes": "522264"
},
{
"name": "Shell",
"bytes": "44497"
}
],
"symlink_target": ""
}
|
import numpy as np
from .utils.voronoi import voronoi_finite_polygons
class RandomPoints:
def __init__(self, points_number):
self.points_number = points_number
def generate(self, map_obj):
map_obj.points = np.random.random((self.points_number, 2))
class RelaxedPoints:
"""
Improve the random set of points with Lloyd Relaxation
"""
def __init__(self, points_number, lloyd_iterations=4):
self.points_number = points_number
self.lloyd_iterations = lloyd_iterations
def generate(self, map_obj):
points = np.random.random((self.points_number, 2))
# make points less "random"
for _ in range(self.lloyd_iterations):
regions = voronoi_finite_polygons(points, bbox=map_obj.bbox)
points = []
for region in regions:
points.append(region.mean(axis=0)) # get centroid
map_obj.points = points
|
{
"content_hash": "6b0a201d989a4142a1f5efa3593668fa",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 27.529411764705884,
"alnum_prop": 0.6303418803418803,
"repo_name": "Alerion/fantasy_map",
"id": "10b4b60e2d1e985f4d7adbf1d87c7fcb59e85d87",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "map/generators/points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2316"
},
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "Python",
"bytes": "71210"
}
],
"symlink_target": ""
}
|
import numpy as np
import os, pdb
from hftools.testing import TestCase
class Test_1(TestCase):
readfun = None
basepath = None
dirname = None
extension = None
filename = "test1"
readpars = {"verbose":False}
def setUp(self):
self.facit_f = np.array([0, 1, 2, 3]) * 1e9
self.facit_s = (np.array([[[1, 2], [3, 4]]]) *
np.array([1, 1j, -1, -1j])[:, np.newaxis, np.newaxis])
self.comments = []
self.read_data()
def read_data(self):
fname = self.basepath / u"testdata" / self.dirname / self.filename + self.extension
self.block = self.readfun[0](fname, **self.readpars)
def test_comments(self):
self.assertEqual(self.block.comments.fullcomments, self.comments)
def test_freq(self):
self.assertAllclose(self.facit_f, self.block["freq"])
def test_data(self):
self.assertAllclose(self.facit_s, self.block["S"])
class Test_2(Test_1):
filename = "test2"
class Test_3(Test_1):
filename = "test3"
class Test_Comment_1(Test_1):
filename = "test4"
readpars = dict(property_to_vars=True, verbose=False)
def setUp(self):
Test_1.setUp(self)
self.comments = ["Vgs [V]= -1",
"Vds [V]= 5.3",
"Ig [uA]= 0.3",
"Id [mA]= 20",
]
def test_prop(self):
self.assertAllclose(self.block["Vgs"], -1)
self.assertAllclose(self.block["Vds"], 5.3)
self.assertAllclose(self.block["Ig"], 0.3e-6)
self.assertAllclose(self.block["Id"], 0.02)
class Test_Comment_2(Test_1):
filename = "test4"
readpars = dict(property_to_vars=False, verbose=False)
def setUp(self):
Test_1.setUp(self)
self.comments = ["Vgs [V]= -1",
"Vds [V]= 5.3",
"Ig [uA]= 0.3",
"Id [mA]= 20",
]
def test_prop(self):
self.assertRaises(KeyError, lambda x:self.block[x], "Vgs", )
self.assertRaises(KeyError, lambda x:self.block[x], "Vds", )
self.assertRaises(KeyError, lambda x:self.block[x], "Ig", )
self.assertRaises(KeyError, lambda x:self.block[x], "Id", )
|
{
"content_hash": "af2a436e63a44cd947cb15f36a4b6acc",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 91,
"avg_line_length": 31.985915492957748,
"alnum_prop": 0.542492294143549,
"repo_name": "hftools/hftools",
"id": "10079b16450d733a2c445d7446a9a84fed0f2e6f",
"size": "2619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hftools/file_formats/tests/base_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "Python",
"bytes": "531942"
}
],
"symlink_target": ""
}
|
import logging
import threading
from absl import flags as gflags
from ct.client import reporter
from Queue import Queue
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("cert_db_writer_queue_size", 10, "Size of certificate "
"queue in db reporter")
class CertDBCertificateReport(reporter.CertificateReport):
def __init__(self, cert_db, log_key):
self._cert_db = cert_db
self.log_key = log_key
self._certs_queue = Queue(FLAGS.cert_db_writer_queue_size)
self._writer = None
super(CertDBCertificateReport, self).__init__()
def _writer_ready(self):
return self._writer and self._writer.is_alive()
def report(self):
super(CertDBCertificateReport, self).report()
if self._writer_ready():
self._certs_queue.join()
logging.info("Finished scheduled writing to CertDB")
self._certs_queue.put(None)
self.reset()
def reset(self):
if self._writer_ready():
self._writer.join()
self._writer = None
def _batch_scanned_callback(self, result):
if not self._writer_ready():
self._writer = threading.Thread(target=_process_certs,
args=(self._cert_db, self.log_key,
self._certs_queue))
self._writer.start()
self._certs_queue.put([(desc, index) for desc, index in result])
def _process_certs(db, log_key, certs_queue):
while True:
certs = certs_queue.get()
try:
# This check must be performed in the try block so task_done will
# be invoked in the finally block regardless of the check results.
if not certs:
break
db.store_certs_desc(certs, log_key)
except:
logging.exception("Failed to store certificate information")
finally:
certs_queue.task_done()
|
{
"content_hash": "a3fb89c0de7d78b63fc9508a3e58f094",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 33.559322033898304,
"alnum_prop": 0.5818181818181818,
"repo_name": "RJPercival/certificate-transparency",
"id": "86e66a19804171c259762de2abc14616adb6b65d",
"size": "1980",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/ct/client/db_reporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1542890"
},
{
"name": "Dockerfile",
"bytes": "3603"
},
{
"name": "HTML",
"bytes": "1195"
},
{
"name": "M4",
"bytes": "21520"
},
{
"name": "Makefile",
"bytes": "26264"
},
{
"name": "Python",
"bytes": "677076"
},
{
"name": "Shell",
"bytes": "80649"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['tweet_me_screen'],
package_dir={'':'src'})
setup(**setup_args)
|
{
"content_hash": "e80e301adc2990d74aa692c65d582cd9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7389558232931727,
"repo_name": "hawesie/strands_social",
"id": "888c2eeb83ae1f7b0583a827f78cf6cfaea067dc",
"size": "312",
"binary": false,
"copies": "2",
"ref": "refs/heads/hydro-devel",
"path": "tweet_me_screen/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "663"
},
{
"name": "C++",
"bytes": "59959"
},
{
"name": "CMake",
"bytes": "23857"
},
{
"name": "HTML",
"bytes": "10553"
},
{
"name": "Python",
"bytes": "31334"
}
],
"symlink_target": ""
}
|
import json
import os
import shutil
import tempfile
import unittest
from telemetry.page import cloud_storage
from telemetry.page import page
from telemetry.page import page_set_archive_info
class MockPage(page.Page):
def __init__(self, url, name=None):
super(MockPage, self).__init__(url, None, name=name)
page1 = MockPage('http://www.foo.com/', 'Foo')
page2 = MockPage('http://www.bar.com/', 'Bar')
page3 = MockPage('http://www.baz.com/')
recording1 = 'data_001.wpr'
recording2 = 'data_002.wpr'
archive_info_contents = ("""
{
"archives": {
"%s": ["%s", "%s"],
"%s": ["%s"]
}
}
""" % (recording1, page1.display_name, page2.display_name, recording2,
page3.display_name))
class TestPageSetArchiveInfo(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
# Write the metadata.
self.page_set_archive_info_file = os.path.join(self.tmp_dir, 'info.json')
with open(self.page_set_archive_info_file, 'w') as f:
f.write(archive_info_contents)
# Write the existing .wpr files.
for i in [1, 2]:
with open(os.path.join(self.tmp_dir, ('data_00%d.wpr' % i)), 'w') as f:
f.write(archive_info_contents)
# Create the PageSetArchiveInfo object to be tested.
self.archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
self.page_set_archive_info_file)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def assertCorrectHashFile(self, file_path):
self.assertTrue(os.path.exists(file_path + '.sha1'))
with open(file_path + '.sha1', 'rb') as f:
self.assertEquals(cloud_storage.CalculateHash(file_path), f.read())
def testReadingArchiveInfo(self):
self.assertIsNotNone(self.archive_info.WprFilePathForPage(page1))
self.assertEquals(recording1, os.path.basename(
self.archive_info.WprFilePathForPage(page1)))
self.assertIsNotNone(self.archive_info.WprFilePathForPage(page2))
self.assertEquals(recording1, os.path.basename(
self.archive_info.WprFilePathForPage(page2)))
self.assertIsNotNone(self.archive_info.WprFilePathForPage(page3))
self.assertEquals(recording2, os.path.basename(
self.archive_info.WprFilePathForPage(page3)))
def testArchiveInfoFileGetsUpdated(self):
"""Ensures that the archive info file is updated correctly."""
expected_archive_file_contents = {
u'description': (u'Describes the Web Page Replay archives for a page'
u' set. Don\'t edit by hand! Use record_wpr for'
u' updating.'),
u'archives': {
u'data_003.wpr': [u'Bar', u'http://www.baz.com/'],
u'data_001.wpr': [u'Foo']
}
}
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.archive_info.AddRecordedPages([page2, page3])
with open(self.page_set_archive_info_file, 'r') as f:
archive_file_contents = json.load(f)
self.assertEquals(expected_archive_file_contents, archive_file_contents)
def testModifications(self):
recording1_path = os.path.join(self.tmp_dir, recording1)
recording2_path = os.path.join(self.tmp_dir, recording2)
new_recording1 = os.path.join(self.tmp_dir, 'data_003.wpr')
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page1))
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page2))
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page3))
self.archive_info.AddRecordedPages([page2])
self.assertTrue(os.path.exists(new_recording1))
self.assertFalse(os.path.exists(new_temp_recording))
self.assertTrue(os.path.exists(recording1_path))
self.assertTrue(os.path.exists(recording2_path))
self.assertCorrectHashFile(new_recording1)
new_recording2 = os.path.join(self.tmp_dir, 'data_004.wpr')
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.archive_info.AddRecordedPages([page3])
self.assertTrue(os.path.exists(new_recording2))
self.assertCorrectHashFile(new_recording2)
self.assertFalse(os.path.exists(new_temp_recording))
self.assertTrue(os.path.exists(recording1_path))
# recording2 is no longer needed, so it was deleted.
self.assertFalse(os.path.exists(recording2_path))
def testCreatingNewArchiveInfo(self):
# Write only the page set without the corresponding metadata file.
page_set_contents = ("""
{
archive_data_file": "new_archive_info.json",
"pages": [
{
"url": "%s",
}
]
}""" % page1.url)
page_set_file = os.path.join(self.tmp_dir, 'new_page_set.json')
with open(page_set_file, 'w') as f:
f.write(page_set_contents)
self.page_set_archive_info_file = os.path.join(self.tmp_dir,
'new_archive_info.json')
# Create the PageSetArchiveInfo object to be tested.
self.archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
self.page_set_archive_info_file)
# Add a recording for all the pages.
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
with open(new_temp_recording, 'w') as f:
f.write('wpr data')
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page1))
self.archive_info.AddRecordedPages([page1])
# Expected name for the recording (decided by PageSetArchiveInfo).
new_recording = os.path.join(self.tmp_dir, 'new_archive_info_000.wpr')
self.assertTrue(os.path.exists(new_recording))
self.assertFalse(os.path.exists(new_temp_recording))
self.assertCorrectHashFile(new_recording)
# Check that the archive info was written correctly.
self.assertTrue(os.path.exists(self.page_set_archive_info_file))
read_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
self.page_set_archive_info_file)
self.assertEquals(new_recording,
read_archive_info.WprFilePathForPage(page1))
|
{
"content_hash": "2b2683de8a02f6096351b3b118d2a117",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 78,
"avg_line_length": 36.01639344262295,
"alnum_prop": 0.6739493248368988,
"repo_name": "chromium2014/src",
"id": "c288c7d121d66417d400e697b68b8664fda8c729",
"size": "6753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/page/page_set_archive_info_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1889381"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "39993418"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "220757674"
},
{
"name": "CSS",
"bytes": "973910"
},
{
"name": "Java",
"bytes": "6583410"
},
{
"name": "JavaScript",
"bytes": "20967999"
},
{
"name": "Mercury",
"bytes": "9480"
},
{
"name": "Objective-C",
"bytes": "943237"
},
{
"name": "Objective-C++",
"bytes": "7190130"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "674461"
},
{
"name": "Python",
"bytes": "10430892"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1337040"
},
{
"name": "Standard ML",
"bytes": "3705"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="legendgroup", parent_name="bar", **kwargs):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "c14056a69be60c19815420cf1c47d5b5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 79,
"avg_line_length": 37.5,
"alnum_prop": 0.6155555555555555,
"repo_name": "plotly/python-api",
"id": "51788b43d3d2baca24c7fc3d87e5f505df7e0f92",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/bar/_legendgroup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""
Command-line tool to inspect model embeddings.
"""
import argparse
import os
import sys
from typing import Iterable, Tuple
import mxnet as mx
import numpy as np
from . import constants as C
from . import model
from . import utils
from .data_io import tokens2ids
from .log import setup_main_logger
from .utils import check_condition
from .vocab import load_source_vocabs, load_target_vocab, reverse_vocab
logger = setup_main_logger(__name__, file_logging=False)
def compute_sims(inputs: mx.nd.NDArray, normalize: bool) -> mx.nd.NDArray:
"""
Returns a matrix with pair-wise similarity scores between inputs.
Similarity score is (normalized) Euclidean distance. 'Similarity with self' is masked
to large negative value.
:param inputs: NDArray of inputs.
:param normalize: Whether to normalize to unit-length.
:return: NDArray with pairwise similarities of same shape as inputs.
"""
if normalize:
logger.info("Normalizing embeddings to unit length")
inputs = mx.nd.L2Normalization(inputs, mode='instance')
sims = mx.nd.dot(inputs, inputs, transpose_b=True)
sims_np = sims.asnumpy()
np.fill_diagonal(sims_np, -9999999.)
sims = mx.nd.array(sims_np)
return sims
def nearest_k(similarity_matrix: mx.nd.NDArray,
query_word_id: int,
k: int,
gamma: float = 1.0) -> Iterable[Tuple[int, float]]:
"""
Returns values and indices of k items with largest similarity.
:param similarity_matrix: Similarity matrix.
:param query_word_id: Query word id.
:param k: Number of closest items to retrieve.
:param gamma: Parameter to control distribution steepness.
:return: List of indices and values of k nearest elements.
"""
# pylint: disable=unbalanced-tuple-unpacking
values, indices = mx.nd.topk(mx.nd.softmax(similarity_matrix[query_word_id] / gamma), k=k, ret_typ='both')
return zip(indices.asnumpy(), values.asnumpy())
def get_embedding_parameter_names(config: model.ModelConfig) -> Tuple[str, str]:
if config.weight_tying and C.WEIGHT_TYING_SRC in config.weight_tying_type and \
C.WEIGHT_TYING_SRC_TRG_SOFTMAX in config.weight_tying_type:
name = "%sweight" % C.SHARED_EMBEDDING_PREFIX
return name, name
else:
return "%sweight" % C.SOURCE_EMBEDDING_PREFIX, "%sweight" % C.TARGET_EMBEDDING_PREFIX
def main():
"""
Command-line tool to inspect model embeddings.
"""
params = argparse.ArgumentParser(description='Shows nearest neighbours of input tokens in the embedding space.')
params.add_argument('--model', '-m', required=True,
help='Model folder to load config from.')
params.add_argument('--checkpoint', '-c', required=False, type=int, default=None,
help='Optional specific checkpoint to load parameters from. Best params otherwise.')
params.add_argument('--side', '-s', required=True, choices=['source', 'target'], help='what embeddings to look at')
params.add_argument('--norm', '-n', action='store_true', help='normalize embeddings to unit length')
params.add_argument('-k', type=int, default=5, help='Number of neighbours to print')
params.add_argument('--gamma', '-g', type=float, default=1.0, help='Softmax distribution steepness.')
args = params.parse_args()
embeddings(args)
def embeddings(args: argparse.Namespace):
logger.info("Arguments: %s", args)
config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME))
source_embedding_name, target_embedding_name = get_embedding_parameter_names(config)
if args.side == "source":
vocab = load_source_vocabs(args.model)[0]
else:
vocab = load_target_vocab(args.model)
vocab_inv = reverse_vocab(vocab)
params_fname = C.PARAMS_BEST_NAME
if args.checkpoint is not None:
params_fname = C.PARAMS_NAME % args.checkpoint
params, _ = utils.load_params(os.path.join(args.model, params_fname))
if args.side == "source":
logger.info("Loading %s", source_embedding_name)
weights = params[source_embedding_name]
else:
logger.info("Loading %s", target_embedding_name)
weights = params[target_embedding_name]
logger.info("Embedding size: %d", weights.shape[1])
logger.info("Computing pairwise similarities...")
sims = compute_sims(weights, args.norm)
# weights (vocab, num_target_embed)
check_condition(weights.shape[0] == len(vocab),
"vocab and embeddings matrix do not match: %d vs. %d" % (weights.shape[0], len(vocab)))
logger.info("Reading from STDin...")
for line in sys.stdin:
tokens = list(utils.get_tokens(line))
if not tokens:
continue
print("Input:", line.rstrip())
ids = tokens2ids(tokens, vocab)
for token, token_id in zip(tokens, ids):
print("%s id=%d" % (token, token_id))
neighbours = nearest_k(sims, token_id, args.k, args.gamma)
for i, (neighbour_id, score) in enumerate(neighbours, 1):
print(" %s id=%d sim=%.4f" % (vocab_inv[neighbour_id], neighbour_id, score))
print()
if __name__ == '__main__':
main()
|
{
"content_hash": "ffa4484dbbc4a121ad3b2968550b8921",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 119,
"avg_line_length": 39.19402985074627,
"alnum_prop": 0.6618431073876618,
"repo_name": "mlperf/training_results_v0.6",
"id": "f87b7666c315f74f8dc5412d874b72a3be03840b",
"size": "5818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/sockeye/sockeye/embeddings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1beta1
def sample_list_model_versions():
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelVersionsRequest(
name="name_value",
)
# Make the request
page_result = client.list_model_versions(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync]
|
{
"content_hash": "4242f4bf33b2cf6401e333e4dc4d8c85",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 26.45,
"alnum_prop": 0.720226843100189,
"repo_name": "googleapis/python-aiplatform",
"id": "e04c1742e1e14e1cb8a316bb5db4885964a7f5f5",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
import csv
from cStringIO import StringIO
from scrapy.link import Link
from .base import BaseLinkExtractor
# see http://docs.python.org/2/library/csv.html#csv-fmt-params
_FORMAT_PARAMETERS = (
('delimiter', ','),
('quotechar', '"'),
('doublequote', True),
('escapechar', None),
('lineterminator', '\r\n'),
('skipinitialspace', False),
('strict', False),
)
class CsvLinkExtractor(BaseLinkExtractor):
def __init__(self, column=0, **kwargs):
self.fmtparams = dict((key, kwargs.pop(key, default)) for key, default in _FORMAT_PARAMETERS)
for key, val in self.fmtparams.items():
if isinstance(val, unicode):
self.fmtparams[key] = val.encode()
super(CsvLinkExtractor, self).__init__(**kwargs)
self.allowed_schemes = filter(lambda x: x and isinstance(x, basestring), self.allowed_schemes)
self.column = column
def _extract_links(self, response):
buff = StringIO(response.body)
reader = csv.reader(buff, **self.fmtparams)
for row in reader:
if len(row) > self.column:
yield Link(row[self.column])
|
{
"content_hash": "4fd51ccc8ad4ee1c6eed7914a99d807d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 102,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.6252173913043478,
"repo_name": "asa1253/portia",
"id": "714208a021d842c578f67a8b47cc11d27715c74c",
"size": "1150",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "slybot/slybot/linkextractor/ecsv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12327"
},
{
"name": "HTML",
"bytes": "110369"
},
{
"name": "Handlebars",
"bytes": "41349"
},
{
"name": "JavaScript",
"bytes": "266148"
},
{
"name": "Makefile",
"bytes": "6096"
},
{
"name": "Nginx",
"bytes": "1282"
},
{
"name": "Python",
"bytes": "274449"
},
{
"name": "Shell",
"bytes": "6538"
}
],
"symlink_target": ""
}
|
import logging
import re
import os
import sys
import getopt
import xml.etree.ElementTree as et
pathList={}
class Test:
def main(self,argv):
try:
opts,args=getopt.getopt(argv[1:],"hl:f:d:")
except getopt.GetoptError:
sys.exit()
#print(os.path.split(argv[0])[0]);
for opt,arg in opts:
if opt=='-h':
self.usage()
sys.exit()
elif opt=='-l':
pathList['configFilePath']=arg
elif opt=='-f':
pathList['filePath']=arg
elif opt=='-d':
pathList['dirPath']=arg
else:
assert False,"unhandled option"
print 'aaaaaaaaaaaaaaaaaaaaaaa'
#end of main
if __name__=='__main__':
Test().main(sys.argv)
|
{
"content_hash": "715f96d9709e07e86914a328b55db80c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 32.02777777777778,
"alnum_prop": 0.35559410234171723,
"repo_name": "EffectHub/effecthub",
"id": "e3d97be91797e04ea4125c49e9aabc924e1ea276",
"size": "1310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/Test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "646153"
},
{
"name": "JavaScript",
"bytes": "1519624"
},
{
"name": "PHP",
"bytes": "7137955"
},
{
"name": "Python",
"bytes": "16836"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.