text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import unicode_literals
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.utils.encoding import force_bytes
from machina.apps.forum_conversation.forum_attachments.cache import cache
from machina.conf import settings as machina_settings
@pytest.mark.django_db
class TestAttachmentCache(object):
def test_should_raise_at_import_if_the_cache_backend_is_not_configured(self):
# Run & check
machina_settings.ATTACHMENT_CACHE_NAME = 'dummy'
with pytest.raises(ImproperlyConfigured):
from machina.apps.forum_conversation.forum_attachments.cache import AttachmentCache
AttachmentCache()
machina_settings.ATTACHMENT_CACHE_NAME = 'machina_attachments'
def test_is_able_to_store_the_state_of_request_files(self):
# Setup
f1 = SimpleUploadedFile('file1.txt', force_bytes('file_content_1'))
f2 = SimpleUploadedFile('file2.txt', force_bytes('file_content_2_long'))
f2.charset = 'iso-8859-1'
files = {'f1': f1, 'f2': f2}
real_cache = cache.get_backend()
# Run
cache.set('mykey', files)
states = real_cache.get('mykey')
# Check
assert states['f1']['name'] == 'file1.txt'
assert states['f1']['content'] == force_bytes('file_content_1')
assert states['f1']['charset'] is None
assert states['f1']['content_type'] == 'text/plain'
assert states['f1']['size'] == 14
assert states['f2']['name'] == 'file2.txt'
assert states['f2']['content'] == force_bytes('file_content_2_long')
assert states['f2']['charset'] == 'iso-8859-1'
assert states['f2']['content_type'] == 'text/plain'
assert states['f2']['size'] == 19
def test_is_able_to_regenerate_the_request_files_dict(self):
# Setup
original_f1 = SimpleUploadedFile('file1.txt', force_bytes('file_content_1'))
original_f2 = SimpleUploadedFile('file2.txt', force_bytes('file_content_2_long' * 300000))
original_f2.charset = 'iso-8859-1'
original_files = {'f1': original_f1, 'f2': original_f2}
cache.set('mykey', original_files)
# Run
files = cache.get('mykey')
assert 'f1' in files
assert 'f2' in files
f1 = files['f1']
f2 = files['f2']
assert isinstance(f1, InMemoryUploadedFile)
assert f1.name == 'file1.txt'
assert f1.file.read() == force_bytes('file_content_1')
assert isinstance(f2, TemporaryUploadedFile) # because of the size of the content of f2
assert f2.name == 'file2.txt'
assert f2.file.read() == force_bytes('file_content_2_long' * 300000)
assert f2.charset == 'iso-8859-1'
|
{
"content_hash": "bd0d4a7558954bd9f8b920bf9f18dd53",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 45.184615384615384,
"alnum_prop": 0.648280558392918,
"repo_name": "franga2000/django-machina",
"id": "2a4f7367f94953de35fe5e3d42c522bd77c27dd3",
"size": "2962",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/conversation/attachments/test_cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13665"
},
{
"name": "HTML",
"bytes": "138474"
},
{
"name": "JavaScript",
"bytes": "5866"
},
{
"name": "Makefile",
"bytes": "1599"
},
{
"name": "Python",
"bytes": "696565"
}
],
"symlink_target": ""
}
|
"""Test code for the Face layer of RPC Framework."""
import abc
import contextlib
import itertools
import threading
import unittest
from concurrent import futures
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(test_coverage.Coverage, unittest.TestCase):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
__metaclass__ = abc.ABCMeta
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.PARALLELISM):
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result() for response_future in response_futures]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.PARALLELISM)
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.PARALLELISM):
request = test_messages.request()
inner_response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
outer_response_future = pool.submit(inner_response_future.result)
requests.append(request)
response_futures_to_indices[outer_response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.PARALLELISM / 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index], response_future.result(), self)
pool.shutdown(wait=True)
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
self._digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
|
{
"content_hash": "7d165e07d079307b9fb8bb19c5f652de",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 97,
"avg_line_length": 41.90112359550562,
"alnum_prop": 0.6819693231792342,
"repo_name": "msiedlarek/grpc",
"id": "1d36a931e8c1f1384c63a8a1c200a2c6efc990ee",
"size": "20180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9457"
},
{
"name": "C",
"bytes": "4479055"
},
{
"name": "C#",
"bytes": "1016752"
},
{
"name": "C++",
"bytes": "1213494"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "255752"
},
{
"name": "Makefile",
"bytes": "539537"
},
{
"name": "Objective-C",
"bytes": "254538"
},
{
"name": "PHP",
"bytes": "93145"
},
{
"name": "Protocol Buffer",
"bytes": "95361"
},
{
"name": "Python",
"bytes": "1562726"
},
{
"name": "Ruby",
"bytes": "425758"
},
{
"name": "Shell",
"bytes": "36138"
},
{
"name": "Swift",
"bytes": "5279"
}
],
"symlink_target": ""
}
|
import json
import os
from dotenv import load_dotenv, find_dotenv
import asyncio
from watson_developer_cloud import ConversationV1
from watson_developer_cloud import ToneAnalyzerV3
# import tone detection
import tone_detection
# load the .env file containing your environment variables for the required services (conversation and tone)
load_dotenv(find_dotenv())
# replace with your own conversation credentials or put them in a .env file
conversation = ConversationV1(
username=os.environ.get('CONVERSATION_USERNAME') or 'YOUR SERVICE NAME',
password=os.environ.get('CONVERSATION_PASSWORD') or 'YOUR PASSWORD',
version='2016-09-20')
# replace with your own tone analyzer credentials
tone_analyzer = ToneAnalyzerV3(
username=os.environ.get('TONE_ANALYZER_USERNAME') or 'YOUR SERVICE NAME',
password=os.environ.get('TONE_ANALYZER_PASSWORD') or 'YOUR SERVICE NAME',
version='2016-02-11')
# replace with your own workspace_id
workspace_id = os.environ.get('WORKSPACE_ID') or 'YOUR WORKSPACE ID'
# This example stores tone for each user utterance in conversation context.
# Change this to false, if you do not want to maintain history
maintainToneHistoryInContext = True
# Payload for the Watson Conversation Service
# user input text required - replace "I am happy" with user input text.
payload = {
'workspace_id':workspace_id,
'input': {
'text': "I am happy"
}
}
def invokeToneConversation (payload, maintainToneHistoryInContext):
'''
invokeToneConversation calls the the Tone Analyzer service to get the tone information for the user's
input text (input['text'] in the payload json object), adds/updates the user's tone in the payload's context,
and sends the payload to the conversation service to get a response which is printed to screen.
:param payload: a json object containing the basic information needed to converse with the Conversation Service's message endpoint.
:param maintainHistoryInContext:
Note: as indicated below, the console.log statements can be replaced with application-specific code to process the err or data object returned by the Conversation Service.
'''
tone = tone_analyzer.tone(text=payload['input']['text'])
conversation_payload = tone_detection.updateUserTone(payload, tone, maintainToneHistoryInContext)
response = conversation.message(workspace_id=workspace_id, message_input=conversation_payload['input'], context=conversation_payload['context'])
print(json.dumps(response, indent=2))
async def invokeToneConversationAsync (payload, maintainToneHistoryInContext):
tone = await tone_detection.invokeToneAsync(payload,tone_analyzer)
conversation_payload = tone_detection.updateUserTone(payload, tone, maintainToneHistoryInContext)
response = conversation.message(workspace_id=workspace_id, message_input=conversation_payload['input'], context=conversation_payload['context'])
print(json.dumps(response, indent=2))
# invoke tone aware calls to conversation - either synchronously or asynchronously
# synchronous call to conversation with tone included in the context
invokeToneConversation(payload,maintainToneHistoryInContext)
# asynchronous call to conversation with tone included in the context
loop = asyncio.get_event_loop()
loop.run_until_complete(invokeToneConversationAsync(payload,maintainToneHistoryInContext))
loop.close()
|
{
"content_hash": "7a7e49e5973972c190fa5a90796e8101",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 176,
"avg_line_length": 45.306666666666665,
"alnum_prop": 0.7775161859917599,
"repo_name": "jpmunic/udest",
"id": "2413e32f0995f3417b582447e0229ac8e4552dea",
"size": "3398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watson_examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77582"
},
{
"name": "JavaScript",
"bytes": "43347"
},
{
"name": "PHP",
"bytes": "17162"
},
{
"name": "Python",
"bytes": "62795"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
from typing import Any
from zerver.actions.realm_settings import do_scrub_realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Script to scrub a deactivated realm."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, required=True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if not realm.deactivated:
print("Realm", options["realm_id"], "is active. Please deactivate the Realm the first.")
exit(0)
print("Scrubbing", options["realm_id"])
do_scrub_realm(realm, acting_user=None)
print("Done!")
|
{
"content_hash": "357568d463a4db94502e9237956fdb50",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 100,
"avg_line_length": 36.68181818181818,
"alnum_prop": 0.6716232961586122,
"repo_name": "rht/zulip",
"id": "5e7b130f709e2f92abb093544edf3eabd3f28626",
"size": "807",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "zerver/management/commands/scrub_realm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "489438"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "743287"
},
{
"name": "Handlebars",
"bytes": "374049"
},
{
"name": "JavaScript",
"bytes": "4000260"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10160680"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284836"
}
],
"symlink_target": ""
}
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.example import feature_pb2 as tensorflow_dot_core_dot_example_dot_feature__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/example/example.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n%tensorflow/core/example/example.proto\x12\ntensorflow\x1a%tensorflow/core/example/feature.proto\"1\n\x07\x45xample\x12&\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x14.tensorflow.Features\"i\n\x0fSequenceExample\x12%\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x14.tensorflow.Features\x12/\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x18.tensorflow.FeatureListsB,\n\x16org.tensorflow.exampleB\rExampleProtosP\x01\xa0\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_example_dot_feature__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXAMPLE = _descriptor.Descriptor(
name='Example',
full_name='tensorflow.Example',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='features', full_name='tensorflow.Example.features', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=141,
)
_SEQUENCEEXAMPLE = _descriptor.Descriptor(
name='SequenceExample',
full_name='tensorflow.SequenceExample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='context', full_name='tensorflow.SequenceExample.context', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_lists', full_name='tensorflow.SequenceExample.feature_lists', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=248,
)
_EXAMPLE.fields_by_name['features'].message_type = tensorflow_dot_core_dot_example_dot_feature__pb2._FEATURES
_SEQUENCEEXAMPLE.fields_by_name['context'].message_type = tensorflow_dot_core_dot_example_dot_feature__pb2._FEATURES
_SEQUENCEEXAMPLE.fields_by_name['feature_lists'].message_type = tensorflow_dot_core_dot_example_dot_feature__pb2._FEATURELISTS
DESCRIPTOR.message_types_by_name['Example'] = _EXAMPLE
DESCRIPTOR.message_types_by_name['SequenceExample'] = _SEQUENCEEXAMPLE
Example = _reflection.GeneratedProtocolMessageType('Example', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLE,
__module__ = 'tensorflow.core.example.example_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.Example)
))
_sym_db.RegisterMessage(Example)
SequenceExample = _reflection.GeneratedProtocolMessageType('SequenceExample', (_message.Message,), dict(
DESCRIPTOR = _SEQUENCEEXAMPLE,
__module__ = 'tensorflow.core.example.example_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SequenceExample)
))
_sym_db.RegisterMessage(SequenceExample)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026org.tensorflow.exampleB\rExampleProtosP\001\240\001\001'))
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "d437d57d1e113c01941f484e6e667778",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 453,
"avg_line_length": 36.21008403361345,
"alnum_prop": 0.7375261081457415,
"repo_name": "shishaochen/TensorFlow-0.8-Win",
"id": "b1cda108700af5e5fda001a4af3a9ad313973d70",
"size": "4417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/core/example/example_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "33878"
},
{
"name": "C",
"bytes": "1390259"
},
{
"name": "C#",
"bytes": "1900628"
},
{
"name": "C++",
"bytes": "28129535"
},
{
"name": "CMake",
"bytes": "417657"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "Emacs Lisp",
"bytes": "7809"
},
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "Go",
"bytes": "8549"
},
{
"name": "Groff",
"bytes": "1272396"
},
{
"name": "HTML",
"bytes": "849000"
},
{
"name": "Java",
"bytes": "3139664"
},
{
"name": "JavaScript",
"bytes": "417956"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "M4",
"bytes": "78386"
},
{
"name": "Makefile",
"bytes": "1177180"
},
{
"name": "Objective-C",
"bytes": "2580186"
},
{
"name": "Objective-C++",
"bytes": "2897"
},
{
"name": "PHP",
"bytes": "342"
},
{
"name": "Protocol Buffer",
"bytes": "924786"
},
{
"name": "Python",
"bytes": "8241830"
},
{
"name": "Ruby",
"bytes": "82233"
},
{
"name": "Shell",
"bytes": "1875702"
},
{
"name": "Swift",
"bytes": "20550"
},
{
"name": "TypeScript",
"bytes": "395532"
},
{
"name": "VimL",
"bytes": "3759"
}
],
"symlink_target": ""
}
|
from wagtail import __version__
from wagtail.utils.setup import assets, check_bdist_egg, sdist
try:
from setuptools import find_packages, setup
except ImportError:
from distutils.core import setup
# Hack to prevent "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when setup.py exits
# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing # noqa
except ImportError:
pass
install_requires = [
"Django>=3.2,<4.2",
"django-modelcluster>=6.0,<7.0",
"django-permissionedforms>=0.1,<1.0",
"django-taggit>=2.0,<4.0",
"django-treebeard>=4.5.1,<5.0",
"djangorestframework>=3.11.1,<4.0",
"django-filter>=2.2,<23",
"draftjs_exporter>=2.1.5,<3.0",
"Pillow>=4.0.0,<10.0.0",
"beautifulsoup4>=4.8,<4.12",
"html5lib>=0.999,<2",
"Willow>=1.4,<1.5",
"requests>=2.11.1,<3.0",
"l18n>=2018.5",
"openpyxl>=3.0.10,<4.0",
"anyascii>=0.1.5",
"telepath>=0.1.1,<1",
]
# Testing dependencies
testing_extras = [
# Required for running the tests
"python-dateutil>=2.7",
"pytz>=2014.7",
"elasticsearch>=5.0,<6.0",
"Jinja2>=3.0,<3.2",
"boto3>=1.16,<1.17",
"freezegun>=0.3.8",
"azure-mgmt-cdn>=5.1,<6.0",
"azure-mgmt-frontdoor>=0.3,<0.4",
"django-pattern-library>=0.7,<0.8",
# For coverage and PEP8 linting
"coverage>=3.7.0",
"black==22.3.0",
"flake8>=3.6.0",
"isort==5.6.4", # leave this pinned - it tends to change rules between patch releases
"flake8-blind-except==0.1.1",
"flake8-comprehensions==3.8.0",
"flake8-print==5.0.0",
"doc8==0.8.1",
"flake8-assertive==2.0.0",
# For templates linting
"curlylint==0.13.1",
# For template indenting
"djhtml==1.4.13",
# for validating string formats in .po translation files
"polib>=1.1,<2.0",
]
# Documentation dependencies
documentation_extras = [
"pyenchant>=3.1.1,<4",
"sphinxcontrib-spelling>=5.4.0,<6",
"Sphinx>=1.5.2",
"sphinx-autobuild>=0.6.0",
"sphinx-wagtail-theme==5.3.2",
"myst_parser==0.18.1",
"sphinx_copybutton>=0.5,<1.0",
]
setup(
name="wagtail",
version=__version__,
description="A Django content management system.",
author="Wagtail core team + contributors",
author_email="hello@wagtail.org", # For support queries, please see https://docs.wagtail.org/en/stable/support.html
url="https://wagtail.org/",
project_urls={
"Documentation": "https://docs.wagtail.org",
"Source": "https://github.com/wagtail/wagtail",
},
packages=find_packages(),
include_package_data=True,
license="BSD",
long_description="Wagtail is an open source content management \
system built on Django, with a strong community and commercial support. \
It’s focused on user experience, and offers precise control for \
designers and developers.\n\n\
For more details, see https://wagtail.org, https://docs.wagtail.org and \
https://github.com/wagtail/wagtail/.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Framework :: Django",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Framework :: Django :: 4.1",
"Framework :: Wagtail",
"Topic :: Internet :: WWW/HTTP :: Site Management",
],
python_requires=">=3.7",
install_requires=install_requires,
extras_require={"testing": testing_extras, "docs": documentation_extras},
entry_points="""
[console_scripts]
wagtail=wagtail.bin.wagtail:main
""",
zip_safe=False,
cmdclass={
"sdist": sdist,
"bdist_egg": check_bdist_egg,
"assets": assets,
},
)
|
{
"content_hash": "313b6cf083a0d47e0efb093dc703e441",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 120,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.6138307984790875,
"repo_name": "zerolab/wagtail",
"id": "f583c74bcbc4bf59a35a8a35c2b54eba7c1f57ea",
"size": "4233",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593037"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6560334"
},
{
"name": "SCSS",
"bytes": "219204"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288102"
}
],
"symlink_target": ""
}
|
AUTHFAILURE = 'AuthFailure'
# DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。
DRYRUNOPERATION = 'DryRunOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# CLS内部错误。
FAILEDOPERATION_CLSINTERNALERROR = 'FailedOperation.CLSInternalError'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 证书内容非法。
INVALIDPARAMETER_INVALIDCERTIFICATE = 'InvalidParameter.InvalidCertificate'
# 根据ID查询证书失败。
INVALIDPARAMETER_QUERYCERTBYSSLIDFAILED = 'InvalidParameter.QueryCertBySSLIDFailed'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 资源不可用。
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# 资源售罄。
RESOURCESSOLDOUT = 'ResourcesSoldOut'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
{
"content_hash": "0f6bc5eb29f6889b0b6686d0319b63c6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 83,
"avg_line_length": 19.262295081967213,
"alnum_prop": 0.7982978723404255,
"repo_name": "tzpBingo/github-trending",
"id": "1d8c4e107df8e108ad37e2bafaa995282b442d16",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/waf/v20180125/errorcodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
AUTHOR = u'Adrian Sampson'
# -- General configuration -----------------------------------------------------
extensions = []
#templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = u'beets'
copyright = u'2012, Adrian Sampson'
version = '1.2'
release = '1.2.1'
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
#html_static_path = ['_static']
htmlhelp_basename = 'beetsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'beets.tex', u'beets Documentation',
AUTHOR, 'manual'),
]
# -- Options for manual page output --------------------------------------------
man_pages = [
('reference/cli', 'beet', u'music tagger and library organizer',
[AUTHOR], 1),
('reference/config', 'beetsconfig', u'beets configuration file',
[AUTHOR], 5),
]
|
{
"content_hash": "668b0a073b1289cb94127753b0da379c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 24.3,
"alnum_prop": 0.5164609053497943,
"repo_name": "jayme-github/beets",
"id": "81ae4e9e62801ba0894604730b1fe7d312292ea5",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "85314"
},
{
"name": "Python",
"bytes": "878962"
}
],
"symlink_target": ""
}
|
import httplib, urllib
class SensorActUploader(object):
def __init__(self, IP, PORT):
self.IP = IP
self.PORT = PORT
self.headers = { "Content-type": "application/json",
"Accept": "text/plain" }
def connect(self):
self.connection = httplib.HTTPConnection(self.IP + ":" + str(self.PORT))
def close(self):
self.connection.close()
def receive(self):
try:
response = self.connection.getresponse()
print "SensorAct ", response.status, response.reason
except httplib.BadStatusLine:
print "Bad status!"
response = None
return response
def send(self, data):
sent = False
while sent == False:
try:
self.connect()
self.connection.request("POST", "/data/upload/wavesegment", data, self.headers)
response = self.receive()
self.connection.close()
if response.status == 200:
# If response was 200 break out of loop
sent = True
except IOError:
print ("No internet connection, will send the data when the internet"
" becomes available")
time.sleep(5)
if __name__ == "__main__":
forwarder = SensorActLocalForwarder("asdf", "asdf")
forwarder.receive()
|
{
"content_hash": "f8a522c91b92cd834c7a3581fb84c231",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 95,
"avg_line_length": 30.82608695652174,
"alnum_prop": 0.5352609308885754,
"repo_name": "nesl/LabSense",
"id": "abd9db38cc1d9bb987c5eb1dd886fd4fef68c92e",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataSinks/SensorAct/SensorActUploader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "62769"
},
{
"name": "C++",
"bytes": "35543"
},
{
"name": "JavaScript",
"bytes": "290755"
},
{
"name": "Lua",
"bytes": "2066"
},
{
"name": "Python",
"bytes": "173208"
},
{
"name": "Shell",
"bytes": "4268"
}
],
"symlink_target": ""
}
|
import imp
import os
import os.path
#
# The following code will search and load waagent code and expose
# it as a submodule of current module
#
def searchWAAgent():
agentPath = '/usr/sbin/waagent'
if(os.path.isfile(agentPath)):
return agentPath
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
for user_path in user_paths:
agentPath = os.path.join(user_path, 'waagent')
if(os.path.isfile(agentPath)):
return agentPath
return None
agentPath = searchWAAgent()
if(agentPath):
waagent = imp.load_source('waagent', agentPath)
else:
raise Exception("Can't load waagent.")
if not hasattr(waagent, "AddExtensionEvent"):
"""
If AddExtensionEvent is not defined, provide a dummy impl.
"""
def _AddExtensionEvent(*args, **kwargs):
pass
waagent.AddExtensionEvent = _AddExtensionEvent
if not hasattr(waagent, "WALAEventOperation"):
class _WALAEventOperation:
HeartBeat="HeartBeat"
Provision = "Provision"
Install = "Install"
UnIsntall = "UnInstall"
Disable = "Disable"
Enable = "Enable"
Download = "Download"
Upgrade = "Upgrade"
Update = "Update"
waagent.WALAEventOperation = _WALAEventOperation
|
{
"content_hash": "1e542a4a011bc6f1f5ff3bd36e945310",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 65,
"avg_line_length": 27.782608695652176,
"alnum_prop": 0.6525821596244131,
"repo_name": "thomas1206/azure-linux-extensions",
"id": "7ae916d843d79adf942fef45dd8c098b662fd632",
"size": "2034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "VMEncryption/main/Utils/WAAgentUtil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "33945"
},
{
"name": "JavaScript",
"bytes": "19742"
},
{
"name": "Makefile",
"bytes": "1554"
},
{
"name": "Python",
"bytes": "2677628"
},
{
"name": "Shell",
"bytes": "13080"
}
],
"symlink_target": ""
}
|
import json
import os.path
import pytest
import demistomock as demisto
@pytest.fixture()
def advisories_list():
from CheckPanosVersionAffected import Advisory
return [
Advisory(
data_type='CVE',
data_format='MITRE',
cve_id='CVE-2019-17440',
cve_date_public='2019-12-19T19:35:00.000Z',
cve_title='PAN-OS on PA-7000 Series: Improper restriction of communication to Log Forwarding Card (LFC)',
description='Improper restriction of communication',
cvss_score=10,
cvss_severity='CRITICAL',
cvss_vector_string='CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H',
affected_version_list=[
"PAN-OS 9.0.5",
"PAN-OS 9.0.4",
"PAN-OS 9.0.3-h3",
"PAN-OS 9.0.3-h2",
"PAN-OS 9.0.3-h1",
"PAN-OS 9.0.3",
"PAN-OS 9.0.2-h4",
"PAN-OS 9.0.2-h3",
"PAN-OS 9.0.2-h2",
"PAN-OS 9.0.2-h1",
"PAN-OS 9.0.2",
"PAN-OS 9.0.1",
"PAN-OS 9.0.0",
"PAN-OS 9.0",
"PAN-OS 8.1.11"
],
),
Advisory(
data_type='CVE',
data_format='MITRE',
cve_id='CVE-2019-17441',
cve_date_public='2019-12-15T19:35:00.000Z',
cve_title='This is a fake advisory',
description='Improper restriction of communication',
cvss_score=10,
cvss_severity='CRITICAL',
cvss_vector_string='CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H',
affected_version_list=[
"PAN-OS 8.1.12-h3",
"PAN-OS 8.1.11",
"PAN-OS 8.1.10"
],
)
]
def test_compare_version_with_advisories(advisories_list):
"""
Given a list of advisories as dataclasses, tests the comparison function
"""
from CheckPanosVersionAffected import compare_version_with_advisories
# Match single item
match = compare_version_with_advisories(panos_version="9.0.5", advisories_list=advisories_list)
assert len(match) == 1
assert match[0].cve_id == "CVE-2019-17440"
# Match multiple advisories
match = compare_version_with_advisories(panos_version="8.1.11", advisories_list=advisories_list)
assert len(match) == 2
# Match no advisories
match = compare_version_with_advisories(panos_version="7.1.11", advisories_list=advisories_list)
assert len(match) == 0
def test_main(mocker):
"""
Tests the complete main() function, including reading advisories in as a list from the context data as it is produced by the
integration command.
"""
from CheckPanosVersionAffected import main
advisories_list = json.load(
open(os.path.sep.join(["test_data", "example_advisories_data.json"])))
mocker.patch.object(demisto, 'args', return_value={
"advisories": advisories_list,
"version": "9.1.3"
})
expected_results = json.load(
open(os.path.sep.join(["test_data", "expected_response.json"])))
mocker.patch.object(demisto, 'results')
main()
demisto.results.assert_called_with(expected_results)
|
{
"content_hash": "5df6cfa7c38bcb35da3ae0ec13ce196a",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 128,
"avg_line_length": 34.56842105263158,
"alnum_prop": 0.5636419001218027,
"repo_name": "demisto/content",
"id": "2048f5e8e2cf14314795478035f2d717eebffe8c",
"size": "3284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/PaloAltoNetworks_SecurityAdvisories/Scripts/CheckPanosVersionAffected/CheckPanosVersionAffected_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
import os.path
import tempfile
from test_common import cache_root, cores_root, library_root
from fusesoc.config import Config
build_root = "test_build_root"
EXAMPLE_CONFIG = """
[main]
build_root = {build_root}
cache_root = {cache_root}
cores_root = {cores_root}
library_root = {library_root}
[library.test_lib]
location = {library_root}/test_lib
auto-sync = false
sync-uri = https://github.com/fusesoc/fusesoc-cores
"""
def test_config_file():
tcf = tempfile.TemporaryFile(mode="w+")
tcf.write(
EXAMPLE_CONFIG.format(
build_root=build_root,
cache_root=cache_root,
cores_root=cores_root,
library_root=library_root,
)
)
tcf.seek(0)
conf = Config(file=tcf)
assert conf.build_root == build_root
def test_config_path():
tcf = tempfile.NamedTemporaryFile(mode="w+")
tcf.write(
EXAMPLE_CONFIG.format(
build_root=build_root,
cache_root=cache_root,
cores_root=cores_root,
library_root=library_root,
)
)
tcf.seek(0)
conf = Config(path=tcf.name)
assert conf.library_root == library_root
def test_config_libraries():
tcf = tempfile.NamedTemporaryFile(mode="w+")
tcf.write(
EXAMPLE_CONFIG.format(
build_root=build_root,
cache_root=cache_root,
cores_root=cores_root,
library_root=library_root,
)
)
tcf.seek(0)
conf = Config(path=tcf.name)
lib = None
for library in conf.libraries:
if library.name == "test_lib":
lib = library
assert lib
assert lib.location == os.path.join(library_root, "test_lib")
assert lib.sync_uri == "https://github.com/fusesoc/fusesoc-cores"
assert not lib.auto_sync
|
{
"content_hash": "da9f93f1f963da0b3f81520eba98fab9",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 69,
"avg_line_length": 22.4875,
"alnum_prop": 0.6125625347415231,
"repo_name": "lowRISC/fusesoc",
"id": "ec5b1100f1b2bf4520bd2e669e379da72a206929",
"size": "1941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "170342"
},
{
"name": "Shell",
"bytes": "739"
},
{
"name": "SystemVerilog",
"bytes": "2485"
},
{
"name": "Tcl",
"bytes": "176"
},
{
"name": "Verilog",
"bytes": "1141"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from package_tracking.component.package_tracking import PackageTrackingComponentImpl
class TestPkgTrkComponentImpl(TestCase):
def test_sub_pkg_trk_msg(self):
pkg_trk = PackageTrackingComponentImpl(mojoqq_host='192.168.30.130', create_table=False)
# pkg_trk.sub_pkg_trk_msg('lyrl', '184387904', '498880156', 'Mojo-Webqq', '418485853713', True)
# pkg_trk.qry_pkg_trk_msg('lyrl', '184387904', '498880156', 'Mojo-Webqq', '4184858537133', True)
|
{
"content_hash": "4d1ba5042d04f7315767de967bd896dd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 104,
"avg_line_length": 55.55555555555556,
"alnum_prop": 0.728,
"repo_name": "lyrl/package_tracking",
"id": "888d31f7725808d9f63f97c9676483008db0bed7",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "package_tracking/test/test_package_tracking_component_impl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46337"
}
],
"symlink_target": ""
}
|
__author__ = 'ar'
import os
import time
import lmdb
import tensorflow as tf
import numpy as np
import shutil
from run00_common import ImageDirParser, DataType
#################################
def buidImageDataset(imageDirParser=None, datasetType='lmdb', numberOfSamples=1000, isRawBlob=False):
if imgDirParser is None:
raise Exception('Invalid imageDirParser')
rndIndex = np.random.randint(0, imgDirParser.getNumSamples(), (numberOfSamples))
dataTypeBuilder = DataType()
# (1) check dataset type and prepare write
tpref = 'raw%d' % isRawBlob
if datasetType == 'lmdb':
dbfout = 'test-dataset-lmdb-%s' % tpref
if os.path.isdir(dbfout):
shutil.rmtree(dbfout)
tsizInBytes = 24 * (1024 ** 3)
lmdbEnv = lmdb.open(dbfout, map_size=tsizInBytes)
elif datasetType == 'tfrecord':
dbfout = 'test-dataset-tf-%s.tfrecord' % tpref
if os.path.isfile(dbfout):
os.remove(dbfout)
writer = tf.python_io.TFRecordWriter(dbfout)
else:
raise Exception('Unknown Dataset-Type [%s]' % datasetType)
t0 = time.time()
# (2) iterate over input data
for ii in range(len(rndIndex)):
ridx = rndIndex[ii]
dataRow = imgDirParser.listPathAndIdx[ridx]
grpName = 'row_%08d' % ii
tfeatureDict = {}
for vvi, vv in enumerate(dataRow):
ttype = imgDirParser.scheme[vvi]
if ttype == 'path-img2d':
tcfg = {
'img': vv,
'fmt': 'raw' if isRawBlob else 'jpeg'
}
elif ttype == 'category-idx':
tcfg = {
'idx': vv,
'name': imgDirParser.dictIdx2Name[vv],
}
elif ttype == 'category-name':
tcfg = {
'idx': imgDirParser.dictName2Idx[vv],
'name': vv,
}
elif ttype == 'array-float':
tcfg = {
'val': vv
}
else:
raise Exception('Unknown feature type [%s]' % ttype)
tdataType = dataTypeBuilder.getDataClassByName(ttype)
# tfeatures = tdataType.data2Blob(tcfg)
tfeatureDict.update(tdataType.data2Blob(tcfg))
# print ('\t[%d] : %s' % (vvi, vv))
texample = tf.train.Example(features = tf.train.Features(feature = tfeatureDict))
if datasetType=='lmdb':
with lmdbEnv.begin(write=True) as lmdbTxn:
lmdbTxn.put(grpName.encode('ascii'), texample.SerializeToString())
# lmdbTxn.commit()
else:
writer.write(texample.SerializeToString())
# print('[%d] : %s' % (ii, grpName))
if datasetType == 'lmdb':
lmdbEnv.close()
else:
writer.close()
dt = time.time() - t0
return dt
#################################
if __name__ == '__main__':
wdir = '../../dataset-image2d/simple4c_test'
imgDirParser = ImageDirParser(wdir=wdir)
print (imgDirParser)
numberOfSamples = 10000
lstOpt_Raw = [False, True]
lstOpt_Dbt = ['lmdb', 'tfrecord']
for opRaw in lstOpt_Raw:
for opBdt in lstOpt_Dbt:
tdt = buidImageDataset(imgDirParser, numberOfSamples=numberOfSamples, datasetType=opBdt, isRawBlob=opRaw)
tspeed = float(numberOfSamples) / tdt
dT1k = 1000. / tspeed
print ('WRITE [%s : isRaw = %d] : T=%0.2fs, #Samples=%d, Speed: %0.3f (Samples/Sec), dt(#1000) = %0.3fs'
% (opBdt, opRaw, tdt, numberOfSamples, tspeed, dT1k))
# tmp = DataType.getDataClassByName(strType=DataType_Image2D.type())
print ('----')
|
{
"content_hash": "b2b658d47c6f547d55f1fa569c38e780",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 117,
"avg_line_length": 37.5959595959596,
"alnum_prop": 0.5502418054809243,
"repo_name": "SummaLabs/DLS",
"id": "a2dfb3c1831e755a8264d087ba704727b07f181b",
"size": "3765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data-test/test_data_serialization/step3_h5py_vs_lmdb/run01_write_lmdb_with_tfrecord.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28477"
},
{
"name": "HTML",
"bytes": "146817"
},
{
"name": "JavaScript",
"bytes": "491364"
},
{
"name": "Jupyter Notebook",
"bytes": "10111"
},
{
"name": "Protocol Buffer",
"bytes": "115393"
},
{
"name": "Python",
"bytes": "877535"
},
{
"name": "Shell",
"bytes": "7969"
}
],
"symlink_target": ""
}
|
import os
import sys
import pytest
from os.path import join
from sqlalchemy.exc import InvalidRequestError
from textwrap import dedent
from ...api import Gradebook
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderAssign(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["assign", "--help-all"])
def test_no_args(self):
"""Is there an error if no arguments are given?"""
run_nbgrader(["assign"], retcode=1)
def test_conflicting_args(self):
"""Is there an error if assignment is specified both in config and as an argument?"""
run_nbgrader(["assign", "--assignment", "foo", "foo"], retcode=1)
def test_multiple_args(self):
"""Is there an error if multiple arguments are given?"""
run_nbgrader(["assign", "foo", "bar"], retcode=1)
def test_no_assignment(self, course_dir):
"""Is an error thrown if the assignment doesn't exist?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
run_nbgrader(["assign", "ps1"], retcode=1)
# check that the --create flag is properly deprecated
run_nbgrader(["assign", "ps1", "--create"], retcode=1)
def test_single_file(self, course_dir, temp_cwd):
"""Can a single file be assigned?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
def test_multiple_files(self, course_dir):
"""Can multiple files be assigned?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'bar.ipynb'))
def test_dependent_files(self, course_dir):
"""Are dependent files properly linked?"""
self._make_file(join(course_dir, 'source', 'ps1', 'data', 'foo.csv'), 'foo')
self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.csv'), 'bar')
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._empty_notebook(join(course_dir, 'source', 'ps1', 'bar.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'bar.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'foo.csv'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.csv'))
with open(join(course_dir, 'release', 'ps1', 'data', 'foo.csv'), 'r') as fh:
assert fh.read() == 'foo'
with open(join(course_dir, 'release', 'ps1', 'data', 'bar.csv'), 'r') as fh:
assert fh.read() == 'bar'
def test_save_cells(self, db, course_dir):
"""Ensure cells are saved into the database"""
self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb'))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
notebook = gb.find_notebook("test", "ps1")
assert len(notebook.grade_cells) == 6
gb.db.close()
def test_force(self, course_dir):
"""Ensure the force option works properly"""
self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb'))
self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo")
self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar")
self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf")
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'test.ipynb'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.txt'))
assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'blah.pyc'))
# check that it skips the existing directory
os.remove(join(course_dir, 'release', 'ps1', 'foo.txt'))
run_nbgrader(["assign", "ps1"])
assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
# force overwrite the supplemental files
run_nbgrader(["assign", "ps1", "--force"])
assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
# force overwrite
os.remove(join(course_dir, 'source', 'ps1', 'foo.txt'))
run_nbgrader(["assign", "ps1", "--force"])
assert os.path.isfile(join(course_dir, "release", "ps1", "test.ipynb"))
assert os.path.isfile(join(course_dir, "release", "ps1", "data", "bar.txt"))
assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc"))
def test_permissions(self, course_dir):
"""Are permissions properly set?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), 'foo')
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
if sys.platform == 'win32':
perms = '666'
else:
perms = '644'
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.ipynb")) == perms
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.txt")) == perms
def test_custom_permissions(self, course_dir):
"""Are custom permissions properly set?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), 'foo')
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--AssignApp.permissions=444"])
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.ipynb"))
assert os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.ipynb")) == "444"
assert self._get_permissions(join(course_dir, "release", "ps1", "foo.txt")) == "444"
def test_add_remove_extra_notebooks(self, db, course_dir):
"""Are extra notebooks added and removed?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 1
notebook1 = gb.find_notebook("test", "ps1")
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"])
gb.db.refresh(assignment)
assert len(assignment.notebooks) == 2
gb.db.refresh(notebook1)
notebook2 = gb.find_notebook("test2", "ps1")
os.remove(join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"])
gb.db.refresh(assignment)
assert len(assignment.notebooks) == 1
gb.db.refresh(notebook1)
with pytest.raises(InvalidRequestError):
gb.db.refresh(notebook2)
gb.db.close()
def test_add_extra_notebooks_with_submissions(self, db, course_dir):
"""Is an error thrown when new notebooks are added and there are existing submissions?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 1
gb.add_student("hacker123")
gb.add_submission("ps1", "hacker123")
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"], retcode=1)
gb.db.close()
def test_remove_extra_notebooks_with_submissions(self, db, course_dir):
"""Is an error thrown when notebooks are removed and there are existing submissions?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test2.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 2
gb.add_student("hacker123")
gb.add_submission("ps1", "hacker123")
os.remove(join(course_dir, "source", "ps1", "test2.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db, "--force"], retcode=1)
gb.db.close()
def test_same_notebooks_with_submissions(self, db, course_dir):
"""Is it ok to run nbgrader assign with the same notebooks and existing submissions?"""
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "test.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1", "--db", db])
gb = Gradebook(db)
assignment = gb.find_assignment("ps1")
assert len(assignment.notebooks) == 1
notebook = assignment.notebooks[0]
gb.add_student("hacker123")
submission = gb.add_submission("ps1", "hacker123")
submission_notebook = submission.notebooks[0]
run_nbgrader(["assign", "ps1", "--db", db, "--force"])
gb.db.refresh(assignment)
assert len(assignment.notebooks) == 1
gb.db.refresh(notebook)
gb.db.refresh(submission)
gb.db.refresh(submission_notebook)
gb.db.close()
def test_force_single_notebook(self, course_dir):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
assert os.path.exists(join(course_dir, "release", "ps1", "p2.ipynb"))
p1 = self._file_contents(join(course_dir, "release", "ps1", "p1.ipynb"))
p2 = self._file_contents(join(course_dir, "release", "ps1", "p2.ipynb"))
assert p1 == p2
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
run_nbgrader(["assign", "ps1", "--notebook", "p1", "--force"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
assert os.path.exists(join(course_dir, "release", "ps1", "p2.ipynb"))
assert p1 != self._file_contents(join(course_dir, "release", "ps1", "p1.ipynb"))
assert p2 == self._file_contents(join(course_dir, "release", "ps1", "p2.ipynb"))
def test_fail_no_notebooks(self):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name="ps1")]\n""")
run_nbgrader(["assign", "ps1"], retcode=1)
def test_no_metadata(self, course_dir):
self._copy_file(join("files", "test-no-metadata.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
# it should fail because of the solution regions
run_nbgrader(["assign", "ps1", "--no-db"], retcode=1)
# it should pass now that we're not enforcing metadata
run_nbgrader(["assign", "ps1", "--no-db", "--no-metadata"])
assert os.path.exists(join(course_dir, "release", "ps1", "p1.ipynb"))
|
{
"content_hash": "6feb7a73a713c8ab83038a614cdfda50",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 112,
"avg_line_length": 47.1764705882353,
"alnum_prop": 0.5908757517969782,
"repo_name": "EdwardJKim/nbgrader",
"id": "3e42d4f6feeb14e8b290bfbaa237287afac0866c",
"size": "13634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nbgrader/tests/apps/test_nbgrader_assign.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7308"
},
{
"name": "CSS",
"bytes": "7606"
},
{
"name": "HTML",
"bytes": "1053323"
},
{
"name": "JavaScript",
"bytes": "195937"
},
{
"name": "Jupyter Notebook",
"bytes": "834178"
},
{
"name": "Makefile",
"bytes": "7317"
},
{
"name": "Python",
"bytes": "675497"
},
{
"name": "Shell",
"bytes": "24"
},
{
"name": "Smarty",
"bytes": "25996"
}
],
"symlink_target": ""
}
|
"""
PEP302 give a lot of inforation about the working of this module
https://www.python.org/dev/peps/pep-0302/
"""
import imp
import logging
import sys
import requests
# Define the order to search for an import.
# First we look for a package __init__ then for non-package .py files.
# We do not look for non-pure Python files, the remote machine may have
# a different Python version / architecture / anything.
_SEARCH_ORDER = ["/__init__.py", ".py"]
class NetworkImportHook(object):
log = logging.getLogger('NetworkImportHook')
def __eq__(self, other):
return (self.__class__.__module__ == other.__class__.__module__ and
self.__class__.__name__ == other.__class__.__name__)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<NetworkImportHook>"
def install(self):
"""
Add the import hook to sys.meta_path, if it's not already there
"""
self.log.debug("Installing %r", self)
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
"""
Part of the import hook's finder.
Should find out if fullname exists as a package or module, possibly
by searching for fullname/__init__.py or fullname.py. If one exists,
self should be returned, otherwise None.
"It should return a loader object if the module was found, or None
if it wasn't. If find_module() raises an exception, it will be
propagated to the caller, aborting the import."
See PEP302
"""
raise NotImplementedError("Implement in subclass")
def _create_module(self, fullname, filename, content):
"""
Given some python source and filename info, return a module
"""
try:
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = "<%s>" % filename
mod.__loader__ = self
if (mod.__file__.endswith('__init__.py>') and
not fullname.endswith('__init__.py')):
# We imported a package
mod.__path__ = []
mod.__package__ = fullname
else:
mod.__package__ = fullname.rsplit('.', 1)[0]
except Exception as e:
del sys.modules[fullname]
raise ImportError("%r was unable to create module '%s': [%s]"
% (self, fullname, e))
self.log.debug("Imported '%s'", fullname)
exec(content, mod.__dict__)
return mod
def load_modules(self, fullname):
"""
Given a fullname should do what is necessary (load, unpackage,
evaluate, etc.) to construct a module object.
Pretty much, just get the Python source and pass it to _create_module.
"""
raise NotImplementedError("Implement in subclass")
class HttpChannel(NetworkImportHook):
log = logging.getLogger('HttpChannel')
def __init__(self, host):
self.host = host
self.session = requests.Session()
try:
# Run a request to trigger request's imports.
# We don't care if it works
requests.options(self.host)
except:
pass
def __repr__(self):
return "<HttpChannel(%r)>" % (self.host,)
def get_filename(self, fullname, so):
return self.host + "/" + fullname.replace('.', '/') + so
def find_module(self, fullname, path=None):
"""
Search for package or module from fullname.
If one is found, return's self, otherwise None.
"""
for so in _SEARCH_ORDER:
try:
path = self.get_filename(fullname, so)
self.request = self.session.get(path)
self.request.raise_for_status()
return self
except requests.exceptions.RequestException as e:
self.log.debug("Unable to import %s: [%s]", path, e)
def load_module(self, fullname):
"""
Returns the loaded module or raises an exception
"""
if fullname in sys.modules:
return sys.modules[fullname]
return self._create_module(fullname, self.request.url,
self.request.content)
|
{
"content_hash": "212fee261265831ae59b1fb242a946a0",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 78,
"avg_line_length": 33.7906976744186,
"alnum_prop": 0.570314292268869,
"repo_name": "ant1441/network-import",
"id": "32f9afbaaf29148ccbce71c8ed9b378bef594b35",
"size": "4359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "network_import_hook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4359"
}
],
"symlink_target": ""
}
|
import numpy
import pytest
from qualipy.filters.exposure import *
OVER_EXPOSED_IMAGE = 'tests/images/over_exposure_sample.jpg'
UNDER_EXPOSED_IMAGE = 'tests/images/under_exposure_sample.jpg'
GOOD_IMAGE = 'tests/images/exposure_sample_good.jpg'
def test_normalized_clipping_percentage_for_black_image():
img = numpy.ones((10, 10)).astype(numpy.uint8)
assert round(normalized_clipping_percentage(img)) == 0
def test_normalized_clipping_percentage_for_white_image():
img = numpy.ones((10, 10)).astype(numpy.uint8)
img[:, :] = 255
assert round(normalized_clipping_percentage(img)) == 50
def test_recognizes_over_exposed():
assert Exposure().predict(OVER_EXPOSED_IMAGE)
def test_recognizes_under_exposed():
assert Exposure().predict(OVER_EXPOSED_IMAGE)
def test_doesnt_recognize_normal_image():
assert not Exposure().predict(GOOD_IMAGE)
def test_setting_threshold():
assert not Exposure(threshold=1).predict(OVER_EXPOSED_IMAGE)
def test_inverting_threshold():
assert Exposure(1.01, invert_threshold=True).predict(OVER_EXPOSED_IMAGE)
def test_can_return_float():
assert type(Exposure().predict(OVER_EXPOSED_IMAGE,
return_boolean=False)) != bool
def test_wrong_path_type_raises_exception():
with pytest.raises(TypeError):
assert Exposure().predict(0)
|
{
"content_hash": "8e1484a41a87a1061b11b8ae914806dc",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 76,
"avg_line_length": 27.1,
"alnum_prop": 0.7136531365313653,
"repo_name": "vismantic-ohtuprojekti/image-filtering-suite",
"id": "d2364a89581a538d24afbdf6f89d2bf9d5ae257c",
"size": "1355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/filters/test_exposure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155186"
}
],
"symlink_target": ""
}
|
import sys
import os
import re
#Adding directory to the path where Python searches for modules
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
#Importing common crypto module
import utility
import common
def main():
string='Burning \'em, if you ain\'t quick and nimble\nI go crazy when I hear a cymbal'
key='ICE'
encrypted_repetitive_xor_string=utility.xor_repetitive(string,key)
print encrypted_repetitive_xor_string
main()
|
{
"content_hash": "119cec9dfb6265a8895836e8740cc94a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 89,
"avg_line_length": 25.55,
"alnum_prop": 0.7671232876712328,
"repo_name": "arvinddoraiswamy/blahblah",
"id": "72f03edfe51509ed59ca6f05a311a737b003a8c2",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cryptopals/Set1/cryp5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4477"
},
{
"name": "Python",
"bytes": "149921"
},
{
"name": "Ruby",
"bytes": "2455"
},
{
"name": "Shell",
"bytes": "20492"
}
],
"symlink_target": ""
}
|
"""
Utility objects and functions for debugging.
"""
class FakeException(Exception):
"""Placeholder exception for swapping into the except clause of a
try-except clause so that errors are thrown.
For example, this is useful in xhr_handlers.get_variant_list() where I have
a try-except that returns a 404 to the user, rather than a 500. However,
for debugging, we often want to see the full stacktrace.
"""
pass
|
{
"content_hash": "4189a458f5ff5639677d51296f00a341",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.7171945701357466,
"repo_name": "woodymit/millstone_accidental_source",
"id": "ceed6e4c621f729350936e208f82721e412e7b9d",
"size": "442",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "genome_designer/debug/debug_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11461"
},
{
"name": "CoffeeScript",
"bytes": "3226"
},
{
"name": "HTML",
"bytes": "76254"
},
{
"name": "JavaScript",
"bytes": "140841"
},
{
"name": "Python",
"bytes": "1009103"
},
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
from PIL import Image
import numpy as np
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'))
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = str(txn.get(label_key))
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate(object):
def __init__(self, imgH=32, imgW=128, keep_ratio=False, min_ratio=1):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
if self.keep_ratio:
ratios = []
for image in images:
w, h = image.size
ratios.append(w / float(h))
ratios.sort()
max_ratio = ratios[-1]
imgW = int(np.floor(max_ratio * imgH))
imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW
transform = resizeNormalize((imgW, imgH))
images = [transform(image) for image in images]
images = torch.cat([t.unsqueeze(0) for t in images], 0)
return images, labels
|
{
"content_hash": "bace28657f18c19b0e9a516f0604c5ca",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 78,
"avg_line_length": 29.533834586466167,
"alnum_prop": 0.5595723014256619,
"repo_name": "ahmedmazariML/Re-Implementation-of-Convolutional-Recurrent-Neural-Network-for-Sequence-Text-Recognition",
"id": "f91b52c3ab83a818fa78fd3d35a9df6a5c0ca767",
"size": "3965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19680"
}
],
"symlink_target": ""
}
|
import math, random
class Task( object ):
"""A task"""
def __init__(self,
Id ,
Name ,
WCET ,
BCET ,
Period ,
Deadline ,
Priority ):
self.id = str(Id)
self.name = str(Name)
self.wcet = int(WCET)
self.bcet = int(BCET)
self.period = int(Period)
self.deadline = int(Deadline)
self.priority = int(Priority)
self.in_coms = [ ]
self.out_coms = [ ]
def my_print (self):
print '\n////// Task ', self.id
print 'Name : ' + self.name
print 'WCET : %d ' % self.wcet
print 'BCET : %d ' % self.bcet
print 'Period : %d ' % self.period
print 'Deadline : %d ' % self.deadline
print 'Priority : %d ' % self.priority
print 'in_coms : ', [ " <--" + str(s.weight) + "-- " + s.source.name for s in self.in_coms ]
print 'out_coms : ', [ " --" + str(s.weight) + "--> " + s.destination.name for s in self.out_coms ]
print '///////////////\n'
class Com( object ):
"""A communication between a task and another"""
def __init__(self,
Id ,
source ,
destination ,
weight ,
tasks ):
self.id = str(Id)
self.weight = int (weight)
for t in tasks :
if t.id == source :
t.out_coms.append( self )
self.source = t
if t.id == destination :
t.in_coms.append( self )
self.destination = t
def my_print (self):
print '\n////// Communication '
print self.id + " : " + self.source.name + '--'+ str(self.weight) + '-->' + self.destination.name
print '///////////////\n'
class Mapping( object ) :
'''
Defines a mapping of t Tasks in an NxM 2D mesh
This object includes the mesh
'''
def __init__( self, N, M ):
self.N = N
self.M = M
self.tiles = [None] * (N*M)
def get_pos ( self, task ) :
for index in range(len(self.tiles)) :
if self.tiles[index] is not None and self.tiles[index].name == task.name :
return math.floor( index / self.N ), index % self.N
return None, None
def set_with_coord ( self, task, x, y ):
self.tiles[ x * self.N + y] = task
def set ( self, task, index ):
self.tiles[ index ] = task
def get ( self, x, y ):
return self.tiles[ x * self.N + y]
def manhattan_distance( self, x1, y1, x2, y2 ):
'''
Computes the manhattan distance.
between the points (x1,y1) and (x2,y2)
on a 2D plane
'''
return ( abs(x1-x2) + abs(y1-y2) )
def manhattan_distance( self, task1, task2 ):
'''
Computes the manhattan distance between two tasks
on a 2D plane
'''
x1, y1 = self.get_pos( task1 )
x2, y2 = self.get_pos( task2 )
return ( abs(x1-x2) + abs(y1-y2) )
def tasks(self) :
return [ t for t in self.tiles if t is not None ]
def random_place( self, tasks ):
for t in tasks :
done = False
while ( not done ) :
x = random.randint(0, self.M-1)
y = random.randint(0, self.N-1)
if ( self.get(x,y) == None) :
self.set_with_coord( t , x, y)
done = True
def __repr__(self) :
string = ''
for x in range(self.N) :
string += '|'
for y in range(self.M) :
elem = self.get(x,y)
if elem is None :
string += " X" + '\t|'
else:
string += " " + self.get(x,y).name + '\t|'
string += '\t\n'
string += '\n'
return string
def cost ( self ):
cost = 0
for x in range(self.N) :
for y in range(self.M) :
elem = self.get(x,y)
if elem is not None :
for out in elem.out_coms :
dest = out.destination
dest_x, dest_y = self.get_pos(dest)
if ( dest_x is not None) : # todo, this is jus tfor LFC
cost += out.weight * self.manhattan_distance( elem, dest )
#print 'Adding distance between' + elem.name + ' and ' + dest.name
return cost
def max_distance_lenght ( self ):
max_com = 0
for x in range(self.N) :
for y in range(self.M) :
elem = self.get(x,y)
if elem is not None :
for out in elem.out_coms :
if not ( (out.source in tabu_list) and (out.destination in tabu_list) ) :
dest = out.destination
new_dist = self.manhattan_distance( elem, dest )
if new_dist > max_com:
second_max_com = max_com
max_com = out
else :
if new_dist > second_max_com :
second_max_com = out
return max_com, second_max_com
def max_distance_overall ( self, tabu_list):
max_com = 0
second_max_com = 0
for x in range(self.N) :
for y in range(self.M) :
elem = self.get(x,y)
if elem is not None :
for out in elem.out_coms :
if not ( (out.source in tabu_list) and (out.destination in tabu_list) ) :
dest = out.destination
new_dist = out.weight * self.manhattan_distance( elem, dest )
if new_dist > max_com:
second_max_com = max_com
max_com = out
else :
if new_dist > second_max_com :
second_max_com = out
return max_com, second_max_com
def reduce_this_com( self, com, tabu_list ):
if (not com.source in tabu_list):
to_move = com.source
other = com.destination
else :
# By specification of max_com_... either source or destination is assumet to not be in the tabu list
to_move = com.destination
other = com.source
if not to_move in tabu_list :
to_move_x, to_move_y = self.get_pos(to_move)
to_move_index = int(to_move_x) * self.N + int(to_move_y)
# Trying to change the source
possible = [None]*4
# Possibility on the right
possible[0]= [ i for i in range(len(self.tiles)) if i > to_move_index and (not self.tiles[i] in tabu_list ) and ( self.tiles[i] is not other) ]
# Possibility on the left
possible[1]= [ i for i in range(len(self.tiles)) if i < to_move_index and (not self.tiles[i] in tabu_list ) and ( self.tiles[i] is not other) ]
# Possibility on above on y axis
possible[2]= [ i for i in range(len(self.tiles)) if i < (to_move_index - self.N) and (not self.tiles[i] in tabu_list ) and ( self.tiles[i] is not other) ]
# Possibility on below on y axis
possible[3]= [ i for i in range(len(self.tiles)) if i > (to_move_index + self.N) and (not self.tiles[i] in tabu_list ) and ( self.tiles[i] is not other) ]
direction = random.randint(0, 3)
while ( len ( possible[direction] ) <= 0 ):
direction = random.randint(0, 3)
point = random.randint(0, 2)
while point >= len( possible[direction] ) :
point = random.randint(0, 2)
index_for_swap = possible[direction][point]
direction_string = ["->", "<-", "^", "v"]
print 'Swap : direction : ' , direction_string[direction], ', swapping ', to_move_index, ' with ', index_for_swap
self.tiles[to_move_index] = self.tiles[index_for_swap]
self.tiles[index_for_swap] = to_move
|
{
"content_hash": "6547831c0fa1cfd96e3aab13e7c31efa",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 167,
"avg_line_length": 30.482993197278912,
"alnum_prop": 0.4318232537380049,
"repo_name": "neonsoftware/network-on-chip-mapping-benchmark",
"id": "aa95ffbc8a538281f2f9b6e34f5723de43157a77",
"size": "8962",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data_structures.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27606"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from grab.util.default_config import DEFAULT_SPIDER_GLOBAL_CONFIG
import importlib
def update_dict(target, source, overwrite=False):
for key, val in source.items():
if key not in target or overwrite:
target[key] = deepcopy(source[key])
return target
def build_root_config(settings_mod_path):
module = importlib.import_module(settings_mod_path)
config = module.GRAB_SPIDER_CONFIG
if not 'global' in config:
config['global'] = {}
update_dict(config['global'], DEFAULT_SPIDER_GLOBAL_CONFIG,
overwrite=False)
return config
def build_spider_config(spider_class, root_config):
spider_name = spider_class.get_spider_name()
spider_config = deepcopy(root_config.get(spider_name, {}))
update_dict(spider_config, root_config['global'], overwrite=False)
spider_class.setup_spider_config(spider_config)
return spider_config
|
{
"content_hash": "da28a3fe1824428ceacee5e277d70075",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 33.392857142857146,
"alnum_prop": 0.6994652406417112,
"repo_name": "mawentao007/reading_grab",
"id": "850549fed141c223e4a13cec1b2090d2e2128599",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grab/util/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5434"
},
{
"name": "Makefile",
"bytes": "910"
},
{
"name": "PostScript",
"bytes": "2788"
},
{
"name": "Python",
"bytes": "407915"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from django.core.urlresolvers import reverse
from getpaid.utils import get_domain
class Command(BaseCommand):
help = 'Display URL path for Epay.dk Online URL configuration'
def handle(self, *args, **options):
current_site = get_domain()
self.stdout.write('Login to Epay.dk configuration page and '
'setup following links:\n\n')
success_name = 'getpaid:epaydk:success'
path = reverse(success_name)
self.stdout.write(' * accepturl URL: http://%s%s\n\thttps://%s%s\n\n' % (
current_site,
path,
current_site,
path
)
)
failure_name = 'getpaid:epaydk:failure'
path = reverse(failure_name)
self.stdout.write(' * cancelurl URL: http://%s%s\n\thttps://%s%s\n\n' % (
current_site,
path,
current_site,
path
)
)
path = reverse('getpaid:epaydk:online')
self.stdout.write(' * callbackurl URL: http://%s%s\n\thttps://%s%s\n\n' % (
current_site,
path,
current_site,
path,
)
)
self.stdout.write('To change domain name please edit Sites settings.\n'
'Don\'t forget to setup your web server to accept\nhttps connection in'
' order to use secure links.\n')
|
{
"content_hash": "e37e4696616670987f1db6dd4aadd755",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 84,
"avg_line_length": 31.977777777777778,
"alnum_prop": 0.5524669909659485,
"repo_name": "nielsonsantana/django-getpaid",
"id": "fa42a39dfbcdd6d6dc3f0d1b48be5c1cbd43d4eb",
"size": "1439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getpaid/backends/epaydk/management/commands/epaydk_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6268"
},
{
"name": "Python",
"bytes": "174142"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals
import os
"""
Deployment based configuration
When deploying topology specify a deployment to match with a settings key.
-o "'topology.deployment=\"local\"'"
Spouts / Bolts in the topolgoy will then pull the settings then need from this module
"""
ALL_SETTINGS = {}
ALL_SETTINGS['local'] = {
'topology':'local',
'appid': 'datawake',
'crawler-in-topic' : 'crawler-in',
'crawler-out-topic' : 'crawler-out',
'visited-topic': 'memex-datawake-visited',
'conn_pool' : "localhost:9092",
'user':'root',
'database':'memex_sotera',
'password':'root',
'host':'localhost'
}
ALL_SETTINGS['cluster'] = {
'topology':'cluster',
'hbase_host':'localhost',
'appid': 'datawake',
'crawler-in-topic' : 'crawler-in',
'crawler-out-topic' : 'crawler-out',
'visited-topic': 'memex-datawake-visited',
'conn_pool' : "localhost:9092",
'user':'root',
'database':'memex_sotera',
'password':'root',
'host':'localhost'
}
ALL_SETTINGS['local-docker'] = {
'topology':'local',
'appid': 'datawake',
'crawler-in-topic' : 'crawler-in',
'crawler-out-topic' : 'crawler-out',
'visited-topic': 'memex-datawake-visited',
'conn_pool' : os.environ['KAFKA_PORT_9092_TCP_ADDR']+":9092",
'user':'root',
'database':'memex_sotera',
'password':os.environ['MYSQL_ENV_MYSQL_ROOT_PASSWORD'],
'host':os.environ['MYSQL_PORT_3306_TCP_ADDR']
}
def get_settings(key):
return ALL_SETTINGS[key]
|
{
"content_hash": "676a5dc7b855f4ff64a1ac68cb2ad048",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 85,
"avg_line_length": 23.907692307692308,
"alnum_prop": 0.6306306306306306,
"repo_name": "diffeo/Datawake",
"id": "dbb1a52b7a5cbd5f470ab9e0aa5b69ba0da734f2",
"size": "1554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memex-datawake-stream/src/datawakestreams/all_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "49948"
},
{
"name": "Clojure",
"bytes": "7538"
},
{
"name": "JavaScript",
"bytes": "296516"
},
{
"name": "Python",
"bytes": "161327"
},
{
"name": "Shell",
"bytes": "1683"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from .cmdline import main
sys.exit(main())
|
{
"content_hash": "719ee6ccc0c33e733a6c3a20846c2ec5",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 38,
"avg_line_length": 13.714285714285714,
"alnum_prop": 0.7395833333333334,
"repo_name": "krisztianfekete/green",
"id": "9e7321cbf581310c63fd4b6b4a574a114b1ea86a",
"size": "96",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "green/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2897"
},
{
"name": "Python",
"bytes": "196550"
},
{
"name": "Shell",
"bytes": "4179"
}
],
"symlink_target": ""
}
|
import logging, urllib2, time, json, os, math
import pymongo, bson
import numpy as np
import StringIO, gzip
import csv
import pandas as pd
from astropy.io import fits
from astropy import wcs, coordinates as coord, units as u
from astropy.cosmology import Planck13 as cosmo
from scipy.optimize import brentq, curve_fit, leastsq
from scipy.interpolate import interp1d
from scipy.integrate import simps
from scipy import stats
from skgof import ad_test
from sklearn.metrics import r2_score
from consensus import rgz_path, data_path, db, version
from processing import *
import contour_path_object as cpo
completed_file = '%s/bending_completed%s.txt' % (rgz_path, version)
# For internal use
from pprint import pprint
import itertools
from sklearn.decomposition import PCA
from corner import corner
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import FormatStrFormatter
plt.ion()
matplotlib.rc('text', usetex=True)
matplotlib.rcParams.update({'font.size': 14})
# Connect to Mongo database
version = '_bending'
subjects = db['radio_subjects']
consensus = db['consensus{}'.format(version)]
catalog = db['catalog{}'.format(version)]
whl = db['WHL15']
rm_m = db['redmapper_members']
rm_c = db['redmapper_clusters']
amf = db['AMFDR9']
version = ''
bent_sources = db['bent_sources{}'.format(version)]
bending_15 = db['bending_15{}'.format(version)]
bending_control = db['bending_control{}'.format(version)]
sdss_sample = db['sdss_sample']
xmatch = db['sdss_whl_xmatch']
distant_sources = db['distant_sources']
# Final sample cuts
total_cuts = {'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'RGZ.radio_consensus':{'$gte':0.65}, 'using_peaks.bending_corrected':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}, 'RGZ.duplicate':0}
# Get dictionary for finding the path to FITS files and WCS headers
with open('%s/first_fits.txt' % rgz_path) as f:
lines = f.readlines()
pathdict = {}
for l in lines:
spl = l.split(' ')
pathdict[spl[1].strip()] = '%s/rgz/raw_images/RGZ-full.%i/FIRST-IMGS/%s.fits' % (data_path, int(spl[0]), spl[1].strip())
### Functions ###
def get_data(subject):
'''
Returns the radio contours belonging to a single subject field
'''
link = subject['location']['contours'] # Gets url as Unicode string
# Use local file if available
jsonfile = link.split("/")[-1]
jsonfile_path = "{0}/rgz/contours/{1}".format(data_path,jsonfile)
if os.path.exists(jsonfile_path):
with open(jsonfile_path,'r') as jf:
data = json.load(jf)
# Otherwise, read from web
else:
# Reform weblink to point to the direct S3 URL, which will work even with older SSLv3
link_s3 = "http://zooniverse-static.s3.amazonaws.com/"+link.split('http://')[-1]
tryCount = 0
while(True): # In case of error, wait 10 sec and try again; give up after 5 tries
tryCount += 1
try:
compressed = urllib2.urlopen(str(link_s3)).read() #reads contents of url to str
break
except (urllib2.URLError, urllib2.HTTPError) as e:
if tryCount>5:
output('Unable to connect to Amazon Web Services; trying again in 10 min', logging.exception)
raise fn.DataAccessError(message)
logging.exception(e)
time.sleep(10)
tempfile = StringIO.StringIO(compressed) # Temporarily stores contents as file (emptied after unzipping)
uncompressed = gzip.GzipFile(fileobj=tempfile, mode='r').read() # Unzips contents to str
data = json.loads(uncompressed) # Loads JSON object
return data
def get_contours(w, ir_pos, peak_pos, data, peak_count):
'''
Returns a list of Path objects corresponding to each outer contour in the data, in RA and dec coordinates
Removes outer layers until there are two components and a disjoint IR
Removes any contours that aren't in this source
'''
assert (peak_count in [2,3]), 'Not a valid morphology'
# Assemble the contour trees
contour_trees = []
for contour in data['contours']:
tree = cpo.Node(w, contour=contour)
contour_trees.append(tree)
# Remove each contour that doesn't contain a peak from this source
contains_peak = []
for ix, tree in enumerate(contour_trees):
if any(tree.contains(peak_pos)):
contains_peak.append(ix)
contour_trees[:] = [tree for ix, tree in enumerate(contour_trees) if ix in contains_peak]
# Combine the entire source into a single tree
value_at_inf = {'arr':[{'x':-1,'y':-1}, {'x':w._naxis1+1,'y':-1}, {'x':w._naxis1+1,'y':w._naxis2+1}, {'x':-1,'y':w._naxis2+1}, {'x':-1,'y':-1}], 'k':-1}
source_tree = cpo.Node(w)
source_tree.insert(cpo.Node(w, value=value_at_inf))
source_tree.children = contour_trees
# Remove the BCG source if it's a triple
if peak_count == 3:
source_tree.remove_triple_center(ir_pos, peak_pos)
# Increase the contour level until the IR position is outside all the contours
roots = []
source_tree.get_equal_disjoint(ir_pos, roots)
source_tree.children = roots
return source_tree
def get_pos_angle(w, ir, method, method_data):
'''
Determines the position angle between the IR position and the given comparison object
Method:
Contour: from the IR position to the most distant part of the radio contour (data is contour_list)
Peak: from the IR position to the peak of each component (data is source['radio']['peaks'])
'''
if method == 'contour':
contour_sky = coord.SkyCoord(w.wcs_pix2world(method_data.vertices,1), unit=(u.deg,u.deg), frame='icrs')
separation = ir.separation(contour_sky)
pos_angle = ir.position_angle(contour_sky)[separation==np.max(separation)][0]
elif method == 'peak':
pos_angle = ir.position_angle(coord.SkyCoord(method_data['ra'], method_data['dec'], unit=(u.deg,u.deg), frame='icrs'))
return pos_angle
def get_angles(w, ir, method, method_data):
'''
Determines the opening angle of the radio tail and the position angle of the angle bisector
Method:
Contour: from the IR position to the most distant part of the radio contour (data is contour_list)
Peak: from the IR position to the peak of each component (data is source['radio']['peaks'])
'''
assert (method in ['contour', 'peak']), 'Not a valid method'
pos_angle_0 = get_pos_angle(w, ir, method, method_data[0])
pos_angle_1 = get_pos_angle(w, ir, method, method_data[1])
opening_angle = np.abs(pos_angle_1-pos_angle_0).wrap_at(2*np.pi*u.rad)
bending_angle = coord.Angle(np.abs(np.pi*u.rad - opening_angle))
bisector = (pos_angle_1+pos_angle_0)/2.
if np.abs(bisector-pos_angle_0) > np.pi/2*u.rad:
bisector += np.pi*u.rad
bending_angles = {'pos_angle_0':pos_angle_0, 'pos_angle_1':pos_angle_1, 'bending_angle':bending_angle, 'bisector':bisector.wrap_at(2*np.pi*u.rad)}
return bending_angles
def get_global_peaks(w, peak_pos, peaks, contour_tree):
'''
Determines the position of the global maximum for each component in the contour
'''
global_peaks = []
for child in contour_tree.children:
global_peak = {'flux':0}
for peak in [peaks[ix] for ix, elem in enumerate(child.contains(peak_pos)) if elem]:
if peak['flux'] > global_peak['flux']:
global_peak = peak
if global_peak['flux'] > 0:
global_peaks.append(global_peak)
return global_peaks
def curve_intersect(fun1, fun2, xmin, xmax):
'''
Finds the intersection of two curves, bounded in [xmin, xmax]
Returns an array of x values
'''
diff = lambda x: fun1(x)-fun2(x)
x_range = np.linspace(xmin, xmax, 100)
m_sign = np.sign(diff(x_range)).astype(int)
roots = x_range[np.where(m_sign[1:] - m_sign[:-1] != 0)[0] + 1]
# If they don't cross, return None
if len(roots) == 0:
return np.array([])
# If they cross exactly once, find thcone global solution
elif len(roots) == 1:
return np.array([brentq(diff, xmin, xmax)])
# If they cross multiple times, find the local solution between each root
else:
limits = np.concatenate(([xmin], roots, [xmax]))
intersections = np.empty(len(limits)-2)
for ix in range(len(intersections)):
intersections[ix] = brentq(diff, limits[ix], limits[ix+1])
return intersections
def get_colinear_separation(w, ir, peak, contour):
'''
Finds the distance from the host to the edge of the contour, passing through the peak
'''
ir_pos = w.wcs_world2pix(np.array([[ir.ra.deg,ir.dec.deg]]), 1)[0]
peak_pos = w.wcs_world2pix(np.array([[peak['ra'], peak['dec']]]), 1)[0]
# Extrapolate the line connecting the peak to the IR position
slope = (peak_pos[1]-ir_pos[1])/(peak_pos[0]-ir_pos[0])
extrap_pos = ir_pos + w._naxis1*np.array([1.,slope])
extrap_neg = ir_pos - w._naxis1*np.array([1.,slope])
# Split the contours into well-behaved functions
# Roll the array until the first index is the minimum value
x, y = contour.vertices.T
xmin_loc = np.where(x==min(x))[0][0]
x_rot = np.append(np.roll(x[:-1], len(x)-xmin_loc-1), min(x))
y_rot = np.append(np.roll(y[:-1], len(x)-xmin_loc-1), y[xmin_loc])
# Find where the contour doubles back on itself along the x-axis
m_sign = np.sign(x_rot[1:]-x_rot[:-1])
roots = np.where(m_sign[1:] - m_sign[:-1] != 0)[0] + 1
limits = np.concatenate(([0], roots, [len(x_rot)-1]))
# Split the contours at the double-back positions
domains = []
ranges = []
for ix in range(len(limits)-1):
domains.append(x_rot[limits[ix]:limits[ix+1]+1])
ranges.append(y_rot[limits[ix]:limits[ix+1]+1])
# Interpolate the contour segments
c_interps = []
for x_seg, y_seg in zip(domains, ranges):
c_interp = interp1d(x_seg, y_seg, 'linear')
c_interps.append(c_interp)
if peak_pos[0] > ir_pos[0]:
tail = np.vstack((extrap_neg, ir_pos, peak_pos, extrap_pos))
else:
tail = np.vstack((extrap_pos, ir_pos, peak_pos, extrap_neg))
tail_interp = interp1d(tail.T[0], tail.T[1], 'linear')
# Find the intersections of the contours and tail
x_intersects, y_intersects = [], []
for ix, c_interp in enumerate(c_interps):
x_intersect = curve_intersect(tail_interp, c_interp, domains[ix][0], domains[ix][-1])
y_intersect = c_interp(x_intersect)
x_intersects.append(x_intersect)
y_intersects.append(y_intersect)
intersects = np.vstack((np.hstack(x_intersects), np.hstack(y_intersects))).T
# Return the maximum separation between host and edge
intersects_sky = coord.SkyCoord(w.wcs_pix2world(intersects,1), unit=(u.deg,u.deg), frame='icrs')
return max(ir.separation(intersects_sky))
def get_tail_lengths(w, ir, method, contour_list, peaks=None):
'''
Determines angular separation between the IR position and the given comparison object
Method:
Contour: from the IR position to the most distant part of the radio contour
Peak: from the IR position to the peak of the component
'''
tail_lengths = []
if method == 'contour':
for contour in contour_list:
contour_sky = coord.SkyCoord(w.wcs_pix2world(contour.vertices,1), unit=(u.deg,u.deg), frame='icrs')
separation = ir.separation(contour_sky)
tail_lengths.append(np.max(separation))
elif method == 'peak':
assert (peaks is not None), 'No radio peaks provided'
for contour, peak in zip(contour_list, peaks):
tail_lengths.append(get_colinear_separation(w, ir, peak, contour))
return tail_lengths
def peak_edge_ratio(w, ir, peaks, tails):
'''
Calculate the ratio of the distance to the peak and to the edge of each tail (measured on the sky)
'''
ratios = []
for peak, tail in zip(peaks, tails):
peak_pos = coord.SkyCoord(peak['ra'], peak['dec'], unit=(u.deg,u.deg), frame=('icrs'))
ratios.append(ir.separation(peak_pos).deg/tail.deg)
return ratios
def get_z(source):
'''
Returns the best redshift value and uncertainty for the source (only if SDSS)
'''
if 'SDSS' in source and 'spec_redshift' in source['SDSS']:
return source['SDSS']['spec_redshift'], source['SDSS']['spec_redshift_err']
elif 'SDSS' in source and 'photo_redshift' in source['SDSS']:
return source['SDSS']['photo_redshift'], source['SDSS']['photo_redshift_err']
else:
return 0, 0
# elif 'AllWISE' in source and 'photo_redshift' in source['AllWISE']:
# return source['AllWISE']['photo_redshift'], 0
def get_whl(ir, z, z_err, transverse, dz):
'''
Find the corresponding galaxy cluster in the WHL15 catalog
If multiple clusters match, choose the one with least angular separation
'''
# If the galaxy is too close, physical separations become too great on the sky
# Restrict redshifts to at least 0.01
if z < 0.01:
return None
# Maximum separation
max_sep = float(transverse * u.Mpc / cosmo.angular_diameter_distance(z) * u.rad / u.deg)
best_sep = np.inf
cluster = None
for temp_c in whl.find({'RAdeg':{'$gt':ir.ra.deg-max_sep, '$lt':ir.ra.deg+max_sep}, 'DEdeg':{'$gt':ir.dec.deg-max_sep, '$lt':ir.dec.deg+max_sep}, \
'$or':[{'zspec':{'$gt':z-dz, '$lt':z+dz}}, {'zphot':{'$gt':z-dz, '$lt':z+dz}}]}):
current_sep = ir.separation( coord.SkyCoord(temp_c['RAdeg'], temp_c['DEdeg'], unit=(u.deg,u.deg), frame='icrs') )
if (current_sep < best_sep) and (current_sep < max_sep*u.deg):
best_sep = current_sep
cluster = temp_c
return cluster
def get_redmapper(objID, ir, z, z_err, transverse, dz):
'''
Find the corresponding galaxy cluster in the redMaPPer catalog
First check against member catalog, then check radially
If multiple clusters match, choose the one with least angular separation
'''
# If the galaxy is too close, physical separations become too great on the sky
# Restrict redshifts to at least 0.01
if z < 0.01:
return None
member = rm_m.find_one({'ObjID':objID})
if member is not None:
return rm_c.find_one({'ID':member['ID']})
# Maximum separation
max_sep = float(transverse * u.Mpc / cosmo.angular_diameter_distance(z) * u.rad / u.deg)
best_sep = np.inf
cluster = None
for temp_c in rm_c.find({'RAdeg':{'$gt':ir.ra.deg-max_sep, '$lt':ir.ra.deg+max_sep}, 'DEdeg':{'$gt':ir.dec.deg-max_sep, '$lt':ir.dec.deg+max_sep}, \
'$or':[{'zspec':{'$gt':z-dz, '$lt':z+dz}}, {'zlambda':{'$gt':z-dz, '$lt':z+dz}}]}):
current_sep = ir.separation( coord.SkyCoord(temp_c['RAdeg'], temp_c['DEdeg'], unit=(u.deg,u.deg), frame='icrs') )
if (current_sep < best_sep) and (current_sep < max_sep*u.deg):
best_sep = current_sep
cluster = temp_c
return cluster
def get_amf(ir, z, z_err, transverse, dz):
'''
Find the corresponding galaxy cluster in the WHL15 catalog
If multiple clusters match, choose the one with least angular separation
'''
# If the galaxy is too close, physical separations become too great on the sky
# Restrict redshifts to at least 0.01
if z < 0.01:
return None
# Maximum separation
max_sep = float(transverse * u.Mpc / cosmo.angular_diameter_distance(z) * u.rad / u.deg)
best_sep = np.inf
cluster = None
for temp_c in amf.find({'ra':{'$gt':ir.ra.deg-max_sep, '$lt':ir.ra.deg+max_sep}, 'dec':{'$gt':ir.dec.deg-max_sep, '$lt':ir.dec.deg+max_sep}, \
'z':{'$gt':z-dz, '$lt':z+dz}}):
current_sep = ir.separation( coord.SkyCoord(temp_c['ra'], temp_c['dec'], unit=(u.deg,u.deg), frame='icrs') )
if (current_sep < best_sep) and (current_sep < max_sep*u.deg):
best_sep = current_sep
cluster = temp_c
return cluster
def get_bending(source, peak_count):
'''
Calculate all the bending parameters that don't depend on the cluster
'''
assert (peak_count in [2, 3]), 'Not a valid morphology'
subject = subjects.find_one({'zooniverse_id':source['zooniverse_id']})
# Get pixel-to-WCS conversion
fid = subject['metadata']['source']
fits_loc = pathdict[fid]
w = wcs.WCS(fits.getheader(fits_loc, 0))
# Get the location of the source
ir = coord.SkyCoord(source['SDSS']['ra'], source['SDSS']['dec'], unit=(u.deg,u.deg), frame='icrs') if 'SDSS' in source else coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
ir_pos = w.wcs_world2pix(np.array([[ir.ra.deg,ir.dec.deg]]), 1)
z, z_err = get_z(source)
peaks = source['radio']['peaks']
peak_pos = w.wcs_world2pix(np.array([ [peak['ra'],peak['dec']] for peak in peaks ]), 1)
# Get image parameters for this source
data = get_data(subject)
contour_tree = get_contours(w, ir_pos, peak_pos, data, peak_count)
peaks = get_global_peaks(w, peak_pos, peaks, contour_tree)
if len(peaks) != 2:
output("%s didn't have 2 tails" % source['zooniverse_id'])
return
contour_list = [child.path for child in contour_tree.children if any(child.contains(peak_pos))]
# Using the 'contour' method
# bending_angles = get_angles(w, ir, 'contour', contour_list)
# tail_lengths_apparent = get_tail_lengths(w, ir, 'contour', contour_list)
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# ratios = peak_edge_ratio(w, ir, peaks, tail_lengths_apparent)
# asymmetry = ratios[1]/ratios[0]
# using_contour = {'tail_deg_0':tail_lengths_apparent[0], 'tail_deg_1':tail_lengths_apparent[1], 'size_deg':sum(tail_lengths_apparent), 'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc), 'ratio_0':ratios[0], 'ratio_1':ratios[1], 'asymmetry':max(asymmetry,1./asymmetry)}
# using_contour.update(bending_angles)
# for key in using_contour.keys():
# if type(using_contour[key]) is coord.angles.Angle:
# using_contour[key] = using_contour[key].deg
# Using the 'peak' method
bending_angles = get_angles(w, ir, 'peak', peaks)
# tail_lengths_apparent = get_tail_lengths(w, ir, 'peak', contour_list, peaks)
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# ratios = peak_edge_ratio(w, ir, peaks, tail_lengths_apparent)
# asymmetry = ratios[1]/ratios[0]
# using_peaks = {'tail_deg_0':tail_lengths_apparent[0], 'tail_deg_1':tail_lengths_apparent[1], 'size_deg':sum(tail_lengths_apparent), 'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc), 'ratio_0':ratios[0], 'ratio_1':ratios[1], 'asymmetry':max(asymmetry,1./asymmetry)}
# using_peaks.update(bending_angles)
# for key in using_peaks.keys():
# if type(using_peaks[key]) is coord.angles.Angle:
# using_peaks[key] = using_peaks[key].deg
morphology = 'double' if peak_count == 2 else 'triple'
rgz = {'RGZ_id':source['catalog_id'], 'zooniverse_id':source['zooniverse_id'], 'ir_consensus':source['consensus']['ir_level'], 'radio_consensus':source['consensus']['radio_level'], 'peaks':peaks, 'components':source['radio']['components'], 'morphology':morphology, 'size_arcmin':source['radio']['max_angular_extent']/60., 'size_kpc':float((cosmo.angular_diameter_distance(z)*source['radio']['max_angular_extent']*np.pi/180.)/u.kpc)}
# entry = {'RGZ':rgz, 'using_contour':using_contour, 'using_peaks':using_peaks}
entry = {'RGZ':rgz, 'using_peaks':{}}
if 'SDSS' in source:
entry['SDSS'] = source['SDSS']
if 'AllWISE' in source:
entry['AllWISE'] = source['AllWISE']
best_ra = entry['SDSS']['ra'] if 'SDSS' in entry else entry['AllWISE']['ra']
best_dec = entry['SDSS']['dec'] if 'SDSS' in entry else entry['AllWISE']['dec']
entry['best'] = {'ra':best_ra, 'dec':best_dec, 'redshift':z}
return entry
def make_bent_sources():
'''
Generate a collection of radio sources with bending parameters that don't depend on the cluster
Once generated, various matching schemes to the clusters can be tried efficiently
'''
# Determine which sources have already been processed
completed = []
if os.path.exists(completed_file):
with open(completed_file, 'r') as f:
lines = f.readlines()
for line in lines:
completed.append(int(line))
z_range = [0.01, 0.8]
# 'ignore_bending':False
double_args = {'$and': [{'overedge':0, 'catalog_id':{'$nin':completed}}, \
{'$or': [{'radio.number_peaks':2, 'radio.number_components':1}, \
{'radio.number_components':2}]}, \
{'$or': [{'SDSS.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'SDSS.spec_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'AllWISE.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}] }]}
triple_args = {'$and': [{'overedge':0, 'catalog_id':{'$nin':completed}}, \
{'$or': [{'radio.number_peaks':3, 'radio.number_components':{'$in':[1,2]}}, \
{'radio.number_components':3}]}, \
{'$or': [{'SDSS.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'SDSS.spec_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}, \
{'AllWISE.photo_redshift':{'$gte':z_range[0], '$lt':z_range[1]}}] }]}
# Find the bending parameters for each source that matches
for args,peak_count,morphology in zip([double_args,triple_args], [2,3], ['double','triple']):
count = bent_sources.find({'RGZ.morphology':morphology}).count()
with open(completed_file, 'a') as f:
for source in catalog.find(args).batch_size(50):
entry = get_bending(source, peak_count)
if entry is not None:
count += 1
output('%i %s' % (count, source['zooniverse_id']))
bent_sources.insert(entry)
print >> f, source['catalog_id']
def get_cluster_match(source):
'''
Given a source from RGZ, match it to a cluster and calculate the redshift-dependent bending parameters
'''
ir = coord.SkyCoord(source['SDSS']['ra'], source['SDSS']['dec'], unit=(u.deg,u.deg), frame='icrs') if 'SDSS' in source else \
coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
z, z_err = get_z(source)
# Match to cluster catalogs
cluster_w = get_whl(ir, z, z_err, 15, 0.04*(1+z))
whl_prop = {}
if cluster_w is not None:
c_pos = coord.SkyCoord(cluster_w['RAdeg'], cluster_w['DEdeg'], unit=(u.deg,u.deg), frame='icrs')
c_sep_arc = c_pos.separation(ir)
c_sep_mpc = float(cosmo.angular_diameter_distance(cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot'])/u.Mpc * c_sep_arc.to(u.rad)/u.rad)
c_pos_angle = c_pos.position_angle(ir)
if c_sep_mpc/cluster_w['r500'] < 0.01:
pop = 'BCG'
elif c_sep_mpc/cluster_w['r500'] >= 1.5:
pop = 'outer'
else:
pop = 'inner'
whl_prop = {'ra':c_pos.ra.deg, 'dec':c_pos.dec.deg, 'separation_deg':c_sep_arc.deg, 'separation_Mpc':c_sep_mpc, 'position_angle':c_pos_angle.wrap_at(2*np.pi*u.rad).deg, 'r/r500':c_sep_mpc/cluster_w['r500'], 'population':pop, 'zbest':cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot']}
for key in ['_id', 'N500', 'N500sp', 'RL*500', 'name', 'r500', 'zphot', 'zspec', 'M500']:
if key in cluster_w:
whl_prop[key] = cluster_w[key]
# Only continue if a cluster was matched
if cluster_w is None: #and cluster_r is None and cluster_a is None:
output("%s didn't match to a cluster" % source['RGZ']['zooniverse_id'])
return
else:
z = cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot']
# Using the 'contour' method
# tail_lengths_apparent = [source['using_contour']['tail_deg_0']*u.deg, source['using_contour']['tail_deg_1']*u.deg]
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# using_contour = {'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc)}
# source['using_contour'].update(using_contour)
# Using the 'peak' method
# tail_lengths_apparent = [source['using_peaks']['tail_deg_0']*u.deg, source['using_peaks']['tail_deg_1']*u.deg]
# tail_lengths_physical = []
# for tail in tail_lengths_apparent:
# tail_lengths_physical.append(cosmo.angular_diameter_distance(z) * tail.to(u.rad)/u.rad)
# using_peaks = {'tail_kpc_0':float(tail_lengths_physical[0]/u.kpc), 'tail_kpc_1':float(tail_lengths_physical[1]/u.kpc), 'size_kpc':float(sum(tail_lengths_physical)/u.kpc)}
# source['using_peaks'].update(using_peaks)
# Calculate the orientation angle
# for cluster,prop in zip([cluster_w,cluster_r,cluster_a], [whl_prop,rm_prop,amf_prop]):
# if cluster is not None:
cluster, prop = cluster_w, whl_prop
# for method,using in zip(['contour','peaks'], [source['using_contour'],source['using_peaks']]):
for method,using in zip(['peaks'], [source['using_peaks']]):
orientation = coord.angles.Angle(prop['position_angle'] - using['bisector'], unit=u.deg).wrap_at(360.*u.deg).deg
if orientation > 180.:
orientation = 360. - orientation
prop['orientation_%s' % method] = orientation
if orientation > 90.:
orientation = 180. - orientation
prop['orientation_folded'] = orientation
# Compile all results
if cluster_w is not None:
source['WHL'] = whl_prop
# if cluster_r is not None:
# source['redMaPPer'] = rm_prop
# if cluster_a is not None:
# source['AMFDR9'] = amf_prop
return source
def make_catalog():
'''
Find the sources matching the given search parameters and morphology and run the processing pipeline
'''
# Determine which sources have already been processed
completed = []
if os.path.exists(completed_file):
with open(completed_file, 'r') as f:
lines = f.readlines()
for line in lines:
completed.append(int(line))
# Find the bending and cluster results for each source that matches
for peak_count,morphology in zip([2,3], ['double','triple']):
count = bending_15.find({'RGZ.morphology':morphology}).count()
with open(completed_file, 'a') as f:
for source in bent_sources.find({'RGZ.RGZ_id': {'$nin':completed}, 'RGZ.morphology':morphology}).batch_size(50):
entry = get_cluster_match(source)
if entry is not None:
count += 1
output('%i %s' % (count, source['RGZ']['zooniverse_id']))
bending_15.insert(entry)
print >> f, source['RGZ']['RGZ_id']
# Apply the bending correction
bending_correct(plot=False, update=True, methods=['using_peaks'])
def random_control():
'''
Generate a control sample by randomizing the positions and redshifts of all the sources meeting the radio morphology requirements
'''
# Get the original location values
ras1, ras2, decs1, decs2 = [], [], [], []
for source in bent_sources.find():
ra = source['SDSS']['ra'] if 'SDSS' in source else source['AllWISE']['ra']
dec = source['SDSS']['dec'] if 'SDSS' in source else source['AllWISE']['dec']
if 90 < ra < 290:
ras1.append(ra)
decs1.append(dec)
else:
ras2.append(ra)
decs2.append(dec)
# Shuffle the location values
np.random.shuffle(ras1)
np.random.shuffle(ras2)
np.random.shuffle(decs1)
np.random.shuffle(decs2)
loc = np.vstack([ np.append(ras1,ras2), np.append(decs1,decs2) ]).T
np.random.shuffle(loc)
# Find the bending and cluster results for each source that matches
for ix, source in enumerate(bent_sources.find().batch_size(50)):
# Assign the randomized values
if 'SDSS' in source:
source['SDSS'].update({'ra':loc[ix][0], 'dec':loc[ix][1]})
else:
source['SDSS'] = {'ra':loc[ix][0], 'dec':loc[ix][1]}
entry = get_cluster_match(source)
if entry is not None:
output('%i %s' % (ix, source['RGZ']['zooniverse_id']))
bending_control.insert(entry)
def output(string, fn=logging.info):
'''
Print a string to screen and the logfile
'''
fn(string)
print string
def to_file(filename, coll, params={}):
'''
Print the bending collection to a csv file for analysis
'''
rgz_keys = ['RGZ_id', 'RGZ_name', 'zooniverse_id', 'first_id', 'morphology', 'radio_consensus', 'ir_consensus', 'size_arcmin', 'size_kpc', 'solid_angle', 'overedge']
best_keys = ['ra', 'ra_err', 'dec', 'dec_err', 'redshift', 'redshift_err']
sdss_keys = ['ra', 'dec', 'objID', 'photo_redshift', 'photo_redshift_err', 'spec_redshift', 'spec_redshift_err', 'u', 'u_err', 'g', 'g_err', 'r', 'r_err', 'i', 'i_err', 'z', 'z_err', 'morphological_class', 'spectral_class', 'number_matches']
wise_keys = ['ra', 'dec', 'designation', 'photo_redshift', 'w1mpro', 'w1sigmpro', 'w1snr', 'w2mpro', 'w2sigmpro', 'w2snr', 'w3mpro', 'w3sigmpro', 'w3snr', 'w4mpro', 'w4sigmpro', 'w4snr']
whl_keys = ['name', 'ra', 'dec', 'zphot', 'zspec', 'N500', 'N500sp', 'RL*500', 'M500', 'r500', 'separation_deg', 'separation_Mpc', 'r/r500', 'population', 'position_angle', 'orientation_contour', 'orientation_peaks', 'orientation_folded', 'P', 'P500', 'grad_P', 'alignment']
rm_keys = ['name', 'ra', 'dec', 'zlambda', 'zspec', 'S', 'lambda', 'separation_deg', 'separation_Mpc', 'position_angle', 'orientation_contour', 'orientation_peaks']
amf_keys = ['AMF_id', 'ra', 'dec', 'z', 'r200', 'richness', 'core_radius', 'concentration', 'likelihood', 'separation_deg', 'separation_Mpc', 'position_angle', 'orientation_contour', 'orientation_peaks']
bending_keys = ['pos_angle_0', 'pos_angle_1', 'bending_angle', 'bending_corrected', 'bending_err', 'bending_excess', 'bisector', 'asymmetry'] #, 'tail_deg_0', 'tail_deg_1', 'size_deg', 'tail_kpc_0', 'tail_kpc_1', 'size_kpc', 'ratio_1', 'ratio_0']
all_keys = [rgz_keys, best_keys, sdss_keys, whl_keys, bending_keys, bending_keys]
dict_names = ['RGZ', 'best', 'SDSS', 'WHL', 'using_contour', 'using_peaks']
success = 0
with open(filename, 'w') as f:
header = 'final_sample'
for superkey, key_list in zip(dict_names, all_keys):
for key in key_list:
header += ',%s.%s' % (str(superkey), str(key))
print >> f, header
for entry in coll.find(params):
try:
row = str(entry['final_sample'])
for superkey, key_list in zip(dict_names, all_keys):
for key in key_list:
if superkey in entry and key in entry[superkey]:
if type(entry[superkey][key]) is long or type(entry[superkey][key]) is bson.int64.Int64:
row += ",'%s'" % str(entry[superkey][key])
else:
row += ',%s' % str(entry[superkey][key])
else:
row += ',-99'
print >> f, row
success += 1
except BaseException as e:
output(e, logging.exception)
output('%i/%i successfully printed to %s' % (success, coll.find(params).count(), filename))
def plot_running(x_param, y_param, coll=bending_15, morph=None, pop=None, bin_by=None, bin_count=0, logx=False, logy=False, square=False, bent_cut=0, align=None, combined=False, title=True, dz_cut=None):
'''
Plot the running 1st, 2nd, and 3rd quartiles averaged over window data points
x_param, y_param, and (optional) bin_by need to be 'category.key' to search for coll[category][key]
morph can be 'double' or 'triple' to select only that morphology
pop can be 'BCG', 'non-BCG', 'inner', or 'outer' to select only that population
When selecting 'non-BCG', combined=True will combine the 'inner' and 'outer' sources before smoothing
align can be 'radial' or 'tangential' to select only that alignment
The axes can be plotted on a log scale by specifying logx and logy to True or False
Also select square=True when plotting log-log plots
Data can be binned with bin_by and bin_count
bent_cut applies a minimum corrected bending angle
'''
assert morph in [None, 'double', 'triple'], "morph must be 'double' or 'triple'"
assert pop in [None, 'BCG', 'inner', 'outer', 'separate', 'non-BCG'], "pop must be 'BCG', 'inner', 'outer', 'separate', or 'non-BCG'"
assert align in [None, 'radial', 'tangential'], "align must be 'radial' or 'tangential'"
if bin_by is not None:
assert type(bin_count) is int and bin_count>0, 'bin_count must be positive int'
# Prepare parameters for search
params = total_cuts.copy()
x_param_list = x_param.split('.')
y_param_list = y_param.split('.')
for param in [x_param, y_param]:
if param in params:
params[param]['$exists'] = True
else:
params[param] = {'$exists':True}
if morph is not None:
params['RGZ.morphology'] = morph
if align is not None:
params['WHL.alignment'] = align
if pop == 'separate':
pop_list = ['inner', 'outer', 'BCG']
elif pop == 'non-BCG':
if combined:
pop_list = [{'$ne':'BCG'}]
else:
pop_list = ['inner', 'outer']
else:
pop_list = [pop]
params['using_peaks.bending_corrected']['$gte'] = bent_cut
if dz_cut is not None:
params['WHL.dz'] = {'$gte':-1.*np.abs(dz_cut), '$lte':np.abs(dz_cut)}
# Open the plotting window
fig, ax = plt.subplots()
box = ax.get_position()
# Plot trends for each population of interest
needs_labels = True
windows = set()
if bin_by is not None:
plot_params = {'RGZ.size_arcmin': {'name':"Size (')", 'fmt':'%.2f\n - %.2f', 'width':0.91}, \
'best.redshift': {'name':'Redshift', 'fmt':'%.2f\n - %.2f', 'width':0.89}, \
'WHL.M500': {'name':'Mass', 'fmt':'%.1f\n - %.1f', 'width':0.91}}
bin_by_list = bin_by.split('.')
bins = np.arange(bin_count+1) * 100. / bin_count
vals = []
for i in coll.find(params):
vals.append(i[bin_by_list[0]][bin_by_list[1]])
samples = np.percentile(vals, bins)
ax.plot(0, 0, c='w', label=plot_params[bin_by]['name'])
for pop2 in pop_list:
if pop2 is not None:
params['WHL.population'] = pop2
for i in range(len(samples)-1):
params[bin_by] = {'$gte':samples[i], '$lt':samples[i+1]}
window_size, run_x_50, run_y_50 = get_trends(params, x_param, y_param, coll, True, pop!='BCG')
windows.add(window_size)
if logx:
run_x_50 = np.log10(run_x_50)
if logy:
run_y_50 = np.log10(run_y_50)
if needs_labels:
ax.plot(run_x_50, run_y_50, label=plot_params[bin_by]['fmt'] % (samples[i], samples[i+1]), color='C%i'%i)
ax.set_position([box.x0, box.y0, box.width * plot_params[bin_by]['width'], box.height])
else:
ax.plot(run_x_50, run_y_50, color='C%i'%i)
needs_labels = False
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
else:
for pop2 in pop_list:
if pop2 is not None:
params['WHL.population'] = pop2
window_size, run_x_50, run_y_25, run_y_50, run_y_75 = get_trends(params, x_param, y_param, coll, False, pop!='BCG')
windows.add(window_size)
if logx:
run_x_50 = np.log10(run_x_50)
if logy:
run_y_25, run_y_50, run_y_75 = np.log10(run_y_25), np.log10(run_y_50), np.log10(run_y_75)
if needs_labels:
ax.plot(run_x_50, run_y_50, label='50\%', color='C0')
ax.fill_between(run_x_50, run_y_25, run_y_75, color='C0', alpha=.5)
needs_labels = False
else:
ax.plot(run_x_50, run_y_50, color='C0')
ax.fill_between(run_x_50, run_y_25, run_y_75, color='C0', alpha=.5)
# Make the plot pretty
ax.set_xlabel(get_label(x_param, logx))
ax.set_ylabel(get_label(y_param, logy))
windows_txt = str(min(windows))
if len(windows) > 1:
windows_txt += str(-1*max(windows))
titletxt = '%s%s%ssources (window size: %s)' % (morph+' ' if type(morph) is str else '', align+' ' if type(align) is str else '', pop+' ' if type(pop) is str and pop!='separate' else '', windows_txt)
if title:
ax.set_title('All '+titletxt if titletxt[0:7]=='sources' else titletxt[0].upper()+titletxt[1:])
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
if square:
ax.set_aspect('equal', adjustable='box')
if y_param == 'using_peaks.bending_corrected':
params['WHL.population'] = 'outer'
bend = []
for i in coll.find(params):
bend.append(i['using_peaks']['bending_corrected'])
if logy:
ax.axhline(np.log10(np.median(bend)), ls=':', c='k')
else:
ax.axhline(np.median(bend), ls=':', c='k')
elif y_param == 'using_peaks.bending_excess':
ax.axhline(0, ls=':', c='k')
plt.tight_layout()
def get_trends(params, x_param, y_param, coll, binned, combine_bcg=True):
x_param_list = x_param.split('.')
y_param_list = y_param.split('.')
x, y = [], []
for i in coll.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y.append(i[y_param_list[0]][y_param_list[1]])
x = np.array(x)
y = np.array(y)
window_size = min(len(x)/10, 100)
if 'WHL.population' in params and params['WHL.population'] == 'BCG' and combine_bcg:
run_x_50 = [0.01, 0.011]
run_y_25 = 2*[np.percentile(y, 25)]
run_y_50 = 2*[np.percentile(y, 50)]
run_y_75 = 2*[np.percentile(y, 75)]
else:
run_x_50 = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y_25 = np.array([np.percentile(y[ix:ix+window_size], 25) for ix in np.arange(len(y)-window_size+1)])
run_y_50 = np.array([np.percentile(y[ix:ix+window_size], 50) for ix in np.arange(len(y)-window_size+1)])
run_y_75 = np.array([np.percentile(y[ix:ix+window_size], 75) for ix in np.arange(len(y)-window_size+1)])
if binned:
return window_size, run_x_50, run_y_50
else:
return window_size, run_x_50, run_y_25, run_y_50, run_y_75
def get_label(param, log=False):
if param == 'WHL.r/r500':
label = 'Separation ($r_{500}$)'
elif param == 'WHL.M500':
label = 'Cluster mass ($10^{14}~M_\odot$)'
elif param == 'WHL.P':
label = 'ICM pressure (keV cm$^{-3}$)'
elif 'bending_angle' in param:
label = 'Bending angle (deg)'
elif 'bending_corrected' in param:
label = 'Corrected bending angle (deg)'
elif 'bending_excess' in param:
label = 'Excess bending angle (deg)'
elif 'asymmetry' in param:
label = 'Asymmetry'
elif param == 'RGZ.size_arcmin':
label = 'Size (armin)'
elif param == 'RGZ.size_kpc':
label = 'Size (kpc)'
elif param == 'WHL.grad_P':
label = 'Pressure gradient (keV cm$^{-3}$ kpc$^{-1}$)'
elif param == 'RGZ.luminosity':
label = 'Radio luminosity (W Hz$^{-1}$)'
elif param == 'best.redshift':
label = '$z$'
else:
label = param.replace('_', '\_')
if log:
label = '$\log_{10}$ (' + label.replace('(', '[').replace(')', ']') + ')'
return label
def get_params(param_list, coll=bending_15, morph=None, pop=None):
'''
Returns an array of data containing the values of the specified paramters for all sources in the sample
'''
assert morph in [None, 'double', 'triple'], "morph must be 'double' or 'triple'"
assert pop in [None, 'BCG', 'inner', 'outer'], "pop must be 'BCG', 'inner', or 'outer'"
params = total_cuts.copy()
if morph is not None:
params['RGZ.morphology'] = morph
if pop is not None:
params['WHL.population'] = pop
data = np.zeros([len(param_list), coll.find(params).count()])
for jx, gal in enumerate(coll.find(params)):
for ix, param in enumerate(param_list):
datum = gal[param[0]][param[1]]
if datum=='double':
datum = 2
elif datum=='triple':
datum = 3
elif datum=='BCG':
datum = 0
elif datum=='inner':
datum = 1
elif datum=='outer':
datum = 2
data[ix,jx] = datum
return data
def custom_exp(x, a, b):
return a*np.exp(b*x)
def bending_correct(coll=bending_15, window_size=100, plot=False, update=False, methods=None, comp_err=False):
'''
Apply a pre-determined correction for the angular size dependence to the bending angle
'''
if methods is None:
methods = ['using_peaks', 'using_contour']
elif type(methods) is str:
methods = [methods]
# Repeat for both morphologies and angle-measuring methods separately
for morph in ['double', 'triple']:
for method in methods:
# Print progress
print morph, method
# Collect data from outer region
sizes, angles, separations = [], [], []
params = total_cuts.copy()
del params['using_peaks.bending_corrected']
params['RGZ.morphology'] = morph
params[method+'.bending_angle'] = total_cuts['using_peaks.bending_corrected']
params['WHL.population'] = 'outer'
for gal in bending_15.find(params).sort('RGZ.size_arcmin', 1):
sizes.append(gal['RGZ']['size_arcmin'])
angles.append(gal[method]['bending_angle'])
separations.append(gal['WHL']['r/r500'])
sizes = np.array(sizes)
angles = np.array(angles)
separations = np.array(separations)
# Find running trend
sizes_running = np.array([np.median(sizes[ix:ix+window_size]) for ix in np.arange(len(sizes)-window_size+1)])
angles_running = np.array([np.median(angles[ix:ix+window_size]) for ix in np.arange(len(angles)-window_size+1)])
# Find best fit
med_size = np.median(sizes)
popt, pcov = curve_fit(custom_exp, sizes_running-med_size, angles_running)
perr = np.sqrt(np.diagonal(pcov))
angles_best = custom_exp(sizes-med_size, *popt)
angles_best_running = np.array([np.median(angles_best[ix:ix+window_size]) for ix in np.arange(len(angles_best)-window_size+1)])
# Save fits for comparison
if method == 'using_peaks':
if morph == 'double':
d_x = sizes_running
d_y = angles_running
d_popt = popt
d_err = perr
d_x0 = med_size
else:
t_x = sizes_running
t_y = angles_running
t_popt = popt
t_err = perr
t_x0 = med_size
# Plot fits
if plot:
fig, (ax1, ax2) = plt.subplots(2, sharex=True, gridspec_kw={'height_ratios':[3,1]})
ax1.plot(sizes_running, angles_running, label='Running median')
ax1.plot(sizes_running, angles_best_running, ls='--', label='$%.1fe^{%+.1f(x-%.2f)}$\n$R^2 = %.3f$' % (popt[0], popt[1], med_size, r2_score(angles_running, angles_best_running)))
ax1.legend(loc='upper right')
ax1.set_ylabel('Bending angle (deg)')#('$\\mathrm{%s.bending_angle}$' % method).replace('_', '\_'))
#ax1.set_title('Best fit %s sources' % morph)
ax2.plot(sizes_running, angles_running-angles_best_running, label='residual')
ax2.axhline(0, color='k', lw=1)
ax2.set_xlabel('Size (arcmin)')
ax2.set_ylabel('Residual')
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
fig.tight_layout()
plt.subplots_adjust(hspace=0.05)
# Insert to Mongo
if update:
for gal in coll.find():
best_angle = custom_exp(gal['RGZ']['size_arcmin']-med_size, *popt)
corr_angle = popt[0] * gal[method]['bending_angle'] / best_angle
coll.update({'_id':gal['_id']}, {'$set': {method+'.bending_corrected':corr_angle}})
get_bending_excess()
# Plot comparison of fits
if plot:
fig, ax = plt.subplots(1)
ax.plot(d_x, d_y, label='Double sources')
d_rms = np.sqrt(sum((d_y-custom_exp(d_x-d_x0, *d_popt))**2)/len(d_x))
ax.fill_between(d_x, custom_exp(d_x-d_x0, *d_popt)+d_rms, custom_exp(d_x-d_x0, *d_popt)-d_rms, color='C0', alpha=.6)#, label='Best fit doubles')
ax.plot(t_x, t_y, ls='--', label='Triple sources')
t_rms = np.sqrt(sum((t_y-custom_exp(t_x-t_x0, *t_popt))**2)/len(t_x))
ax.fill_between(t_x, custom_exp(t_x-t_x0, *t_popt)+t_rms, custom_exp(t_x-t_x0, *t_popt)-t_rms, color='C1', alpha=.6)#, label='Best fit triples')
ax.legend(loc='upper right')
ax.set_xlabel('Size (arcmin)')
ax.set_ylabel('Bending angle (deg)')
fig.tight_layout()
def pca_analysis(param_space, coll=bending_15, morph=None, pop=None):
assert param_space in ['bending', 'WHL']
assert morph in [None, 'double', 'triple'], "morph must be 'double' or 'triple'"
assert pop in [None, 'BCG', 'inner', 'outer', 'non-BCG'], "pop must be 'BCG', 'inner', 'outer', or 'non-BCG'"
if param_space == 'bending':
param_list = np.array([['using_peaks', 'bending_angle'], ['RGZ', 'size_arcmin'], ['RGZ', 'size_kpc'], ['WHL', 'r/r500'], ['WHL', 'M500'], ['WHL', 'orientation_folded'], ['best', 'redshift']])
names = np.array(['log(bending)', 'size_arcmin', 'size_kpc', 'r/r500', 'log(M500)', 'orientation', 'redshift'])
else:
param_list = np.array([['WHL', 'RL*500'], ['WHL', 'r500'], ['WHL', 'M500'], ['WHL', 'zbest']])
names = np.array(['log(RL*500)', 'r500', 'log(M500)', 'redshift'])
# Get data
if pop == 'non-BCG':
data1 = get_params(param_list, coll=coll, morph=morph, pop='inner')
data2 = get_params(param_list, coll=coll, morph=morph, pop='outer')
data = np.hstack([data1, data2.T[data2[3]<=10].T])
else:
data = get_params(param_list, coll=coll, morph=morph, pop=pop)
# Plot bending, mass and separation on log scales
if param_space == 'bending':
for i in [0, 4]:
data[i] = np.log10(data[i])
if pop is None:
data[3] = np.log10(data[3])
else:
for i in [0, 2]:
data[i] = np.log10(data[i])
# Normalize the data
mean = np.mean(data, 1).reshape(-1,1)*np.ones(data.shape)
std = np.std(data, 1).reshape(-1,1)*np.ones(data.shape)
data_normed = (data-mean)/std
# Run PCA
pca = PCA()
output = pca.fit_transform(data_normed.T).T
# Plot the features and PCA outputs
if param_space == 'bending' and pop is None:
names[3] = 'log(r/r500)'
corner(data.T, labels=names)
corner(output.T, labels=['PC%i'%i for i in range(len(names))])
# What the PCs are comprised of, printed as html table
output = np.hstack([names.reshape(-1,1), pca.components_])
print '<table style="width: 100%%;" border="0" width="1887" height="127"><tbody>'
for ix, row in enumerate(output.T):
if ix == 0:
out_str = '<tr><td>PC# (contribution to variance)</td><td>'
else:
out_str = '<tr><td>PC%i (%.3f)</td><td>' % (ix-1, pca.explained_variance_ratio_[ix-1])
for val in row:
try:
fval = float(val)
out_str += '%.2f</td><td>' % fval
except ValueError:
out_str += val + '</td><td>'
print out_str[:-4] + '</tr>'
print '</tbody></table>'
def make_corner_plot():
# Get data
param_list = np.array([['using_peaks', 'bending_angle'], ['RGZ', 'size_arcmin'], ['WHL', 'r/r500'], ['WHL', 'M500'], ['WHL', 'orientation_folded'], ['best', 'redshift']])
data = get_params(param_list, coll=bending_15)
# Discard BCGs
#data = data[:, data[5]!=0][:5]
# Check what outlier cuts need to be made
labels = np.array(['Bending (deg)', 'Size (arcmin)', 'Separation\n(r/r500)', 'Cluster mass\n(10^14 M_sun)', 'Redshift'])
corner(data.T, labels=labels)
# Apply readability cuts
bending_cut = data[0]<60
separation_cut = data[2]<10
mass_cut = data[3]<24
data = data[:, np.logical_and.reduce((bending_cut, separation_cut, mass_cut))]
corner(data.T, labels=labels)
plt.tight_layout()
def sample_numbers():
param_list = [{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'best.redshift':{'$exists':True}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'best.redshift':{'$exists':True}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_angle':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_angle':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_corrected':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}, \
{'RGZ.morphology':'double', 'RGZ.duplicate':0, 'RGZ.radio_consensus':{'$gte':.65}, 'RGZ.size_arcmin':{'$lte':1.5}, 'RGZ.overedge':0, 'using_peaks.bending_corrected':{'$lte':135.}, 'best.redshift':{'$gte':0.02, '$lte':0.8}}]
coll_list = [bent_sources, bending_15, bent_sources, bending_15, bent_sources, bending_15]
label_list = ['No cuts', 'Matched', 'Initial cuts (all)', 'Initial cuts (matched)', 'Final (all)', 'Final (matched)']
for params, coll, label in zip(param_list, coll_list, label_list):
double = coll.find(params).count()
params['RGZ.morphology'] = 'triple'
triple = coll.find(params).count()
print label, '(double, triple, total):', double, triple, double+triple
def contamination():
z = []
dz = []
with open('/home/garon/Documents/RGZdata/bending/dz_whl_with_z.csv', 'r') as f:
r = csv.reader(f)
r.next()
for row in r:
z.append(float(row[0]))
dz.append(float(row[1]))
z = np.array(z)
dz = np.array(dz)
dz_norm = dz/(1+z)
mask = np.abs(dz_norm)<0.04
comp = 0.982 # Completeness fraction using 0.04 cut
cont = 0.215 # Contamination fraction using 0.04 cut
f, ax = plt.subplots(1)
n, bins, patches = ax.hist(dz_norm[np.abs(dz_norm)<=.1], 30)
ax.axvline(0.04, c='r', lw=2, label=r'$\Delta z$ threshold')
ax.axvline(-0.04, c='r', lw=2)
ax.axhline(np.median(n[(np.abs(bins)>0.04)[:-1]]), c='k', lw=2, ls=':', label='Background')
#ax.plot(0, 0, c='w', label='Completeness: %.3f\nContamination: %.3f' % (comp, cont))
ax.legend()
ax.set_xlim(-0.1, 0.1)
ax.set_xlabel('$\Delta z$')
ax.set_ylabel('Count')
#plt.title('Full RGZ-WH15 cross-match')
plt.tight_layout()
sep, n, bins = r500_hist(bending_15, total_cuts, 20)
mean_cont = cont * sum(n) / (np.pi*np.square(bins[-1])) # Number of contaminating sources per square r500
bin_area = np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
cont_per_bin = mean_cont * bin_area
fractional_cont = cont_per_bin / n
fc = interp1d(bins, fractional_cont, kind='slinear')
print '%.3f%% at x=%.2f' % (fc(1.5), 1.5)
#for i in np.logspace(-1, 2, 20):
# xx = brentq(lambda x: fc(x)-i/100., bins[0], bins[-1])
# print '%.1f%% at x=%.2f' % (i,xx)
'''f, ax = plt.subplots(1)
density = n/bin_area
err = np.sqrt(n)/bin_area
ax.errorbar(bins, density, yerr=err, fmt='o', ms=4, label='Source density')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
fitfunc = lambda p, x: p[0]*x + p[1]
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax.plot(bins, pow(10, index*np.log10(bins)+amp), c='k', label='$\sim(r/r_{500})^{%.2f\pm%.2f}$'%(index,index_err))
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(bbox_to_anchor=(1, 1))
ax.set_xlabel('Separation ($r_{500}$)')
ax.set_ylabel('Count / area of annulus')
#ax.set_title('Count density vs. separation')
ax.set_aspect('equal', adjustable='box')
plt.tight_layout()'''
f, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(np.log10(bins), np.log10(n), label='Observed')
ax1.plot(np.log10(bins), np.log10(cont_per_bin), ls='--', label='Contamination')
ax1.legend(loc='lower right')
ax1.set_ylim(-2.24, 3.24)
ax1.set_yticks(np.arange(-2,4))
ax1.set_ylabel('$\log_{10}$ (Count)')
#ax1.set_title('Contamination')
ax2.plot(np.log10(bins), np.log10(fractional_cont), c='C2', label='Contamination\nfraction')
ax2.legend(loc='lower right')
ax2.axhline(0, ls=':', c='k')
ax2.set_xlabel(get_label('WHL.r/r500', True))
ax2.set_ylabel('$\log_{10}$ (Fraction)')
plt.tight_layout()
def r500_hist(coll, params, bin_count=20):
fig, ax = plt.subplots(1)
sep = []
for i in coll.find(params):
sep.append(i['WHL']['r/r500'])
sep = np.array(sep)
min_sep = min(np.log10(sep))
sep = np.clip(sep, .01, None) # Combine everything less than 0.01 into one bin
n, bins, patches = ax.hist(np.log10(sep), bins=bin_count)
bins0 = bins[0]
bins[0] = min_sep
n, bins, patches = ax.hist(sep, bins=pow(10,bins))
ax.set_xscale('log')
bins[0] = pow(10,bins0)
return sep, n, (bins[:-1]+bins[1:])/2.
def orientation(bent_cutoff=None, folded=True, r_min=0.01, r_max=10):
if bent_cutoff is None:
bent_cutoff = get_bent_cut()
print 'Bending excess cutoff: %.2f' % bent_cutoff
sep = []
bend = []
ori = []
for i in bending_15.find(total_cuts):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
if folded:
ori.append(i['WHL']['orientation_folded'])
else:
ori.append(i['WHL']['orientation_peaks'])
sep = np.array(sep)
bend = np.array(bend)
ori = np.array(ori)
inner = np.logical_and(0.01<sep, sep<1.5)
outer = np.logical_and(1.5<sep, sep<10)
seps = np.vstack([inner, outer])
print sum(inner), 'inner sources,', sum(outer), 'outer sources'
straight = bend<bent_cutoff
bent = bend>=bent_cutoff
bends = np.vstack([bent, straight])
print sum(straight), 'straight sources,', sum(bent), 'bent sources'
if folded:
max_ori = 90.
else:
max_ori = 180.
f, ax = plt.subplots(1)
#ax.set_title('Orientation distribution ($%.2f<r/r_{500}<%.2f$; $\Delta\\theta<%.0f$ deg)' % (r_min, r_max, excess_cutoff))
data = ori[np.logical_and(straight, np.logical_and(sep>r_min, sep<r_max))]
ad = ad_test(data, stats.uniform(0,max_ori))
print ad.pvalue, z_score(ad.pvalue), len(data)
ax.hist(data, bins=6, fill=False, hatch='//', label='$n=%i;~p=%.3f$'%(len(data), ad.pvalue) )
ax.set_ylim(0, 358)
ax.legend(loc='upper center')
ax.set_ylabel('Count')
ax.set_xlabel('Orientation angle (deg)')
plt.tight_layout()
f, ax = plt.subplots(1)
#ax.set_title('Orientation distribution ($%.2f<r/r_{500}<%.2f$; $\Delta\\theta>%.0f$ deg)' % (r_min, r_max, excess_cutoff))
data = ori[np.logical_and(bent, np.logical_and(sep>r_min, sep<r_max))]
if folded:
ad = ad_test(data, stats.uniform(0,max_ori))
print ad.pvalue, z_score(ad.pvalue), len(data)
else:
towards = data[data<90]
away = data[data>90]
sig = stats.anderson_ksamp([towards, max_ori-away]).significance_level
print 'Symmetric: p=%.4f' % sig
n, bins, _ = ax.hist(data, bins=6, fill=False, hatch='//', label='$n=%i;~p=%.3f$'%(len(data), ad.pvalue if folded else sig) )
ax.set_ylim(0, 75)
ax.legend(loc='upper center')
ax.set_ylabel('Count')
ax.set_xlabel('Orientation angle (deg)')
plt.tight_layout()
unfolded, sep = [], []
params = total_cuts.copy()
params['using_peaks.bending_excess'] = {'$gte': bent_cutoff}
params['WHL.r/r500'] = {'$gte': r_min, '$lte': r_max}
for source in bending_15.find(params):
unfolded.append(source['using_peaks']['bisector'] - source['WHL']['position_angle'])
sep.append(source['WHL']['r/r500'])
sep = np.array(sep)
unfolded = np.array(unfolded)
tangential = np.sin(unfolded*np.pi/180)
radial = np.cos(unfolded*np.pi/180)
beta = 1 - np.var(tangential) / np.var(radial)
print 'beta = %.2f +- %.2f' % (beta, beta*np.sqrt(2./(len(unfolded)-1)))
return None
y, yerr = [], []
for i in np.arange(int(max_ori/2.)):
a = sum(data<i)
b = sum(data>max_ori-i)
y.append(a-b)
yerr.append(np.sqrt(a+b))
f, ax = plt.subplots(1)
ax.errorbar(np.arange(int(max_ori/2.)), y, yerr=yerr)
ax.axhline(0)
ax.set_xlabel('Orientation cut')
ax.set_ylabel('Count')
ax.set_title('Excess of inward-moving sources')
mask = np.logical_and(bent, np.logical_and(sep>0.01, sep<15))
n = sum(mask)
bin_count = 6
bins = int(1.*n/bin_count) * np.arange(bin_count+1)
bins[-1] = -1
sep_sort = np.sort(sep[mask])
ori_sort = ori[mask][np.argsort(sep[mask])]
x, diff, err = [], [], []
for i,j in zip(bins[:-1],bins[1:]):
inward = sum(ori_sort[i:j]<30)
outward = sum(ori_sort[i:j]>60)
x.append(np.median(sep_sort[i:j]))
diff.append(inward-outward)
err.append(np.sqrt(inward+outward))
f, ax = plt.subplots(1)
ax.errorbar(x, diff, err)
ax.axhline(0, c='k', ls='dotted')
ax.set_xscale('log')
ax.set_xlabel('Separation ($r_{500}$)')
ax.set_ylabel('Count')
ax.set_title('Excess of radially-moving sources')
def orientation_test():
excess_cutoff = get_bent_cut()
r_min, r_max = 0.01, 10
sep, bend, folded, unfolded = [], [], [], []
for i in bending_15.find(total_cuts):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
folded.append(i['WHL']['orientation_folded'])
unfolded.append(i['WHL']['orientation_peaks'])
sep = np.array(sep)
bend = np.array(bend)
folded = np.array(folded)[np.logical_and(bend>excess_cutoff, np.logical_and(sep>r_min, sep<r_max))]
unfolded = np.array(unfolded)[np.logical_and(bend>excess_cutoff, np.logical_and(sep>r_min, sep<r_max))]
n_f, bins_f, _ = plt.hist(folded, bins=6, alpha=.8, normed=True, fill=False, edgecolor='r', label='Folded')
plt.figure()
n_u, bins_u, _ = plt.hist(unfolded, bins=12, alpha=.8, normed=True, fill=False, hatch='//', label='Unfolded')
# model 1: count goes to 0 at 180 deg
class quad(stats.rv_continuous):
def _argcheck(self, a, b, c):
return np.isfinite(a) and np.isfinite(b) and np.isfinite(c)
def _pdf(self, x, a, b, c):
x0 = 180.
norm = a*x0**3/3. + b*x0**2/2. + c*x0
if type(x) is float:
return max(a*x**2 + b*x + c, 0) / norm
elif type(x) is np.ndarray:
return np.max([a*x**2 + b*x + c, np.zeros(len(x))], axis=0) / norm
else:
raise TypeError('Got %s instead' % str(type(x)))
a, b, c = n_f[0], n_f[-1]/2., 0
popt = np.polyfit([0,90,180], [a,b,c], 2)
case1 = quad(a=0, b=180, shapes='a, b, c')(*popt)
ad1 = ad_test(unfolded, case1)
print 'Model 1: p=%.2g (%.2f sigma)' % (ad1.pvalue, z_score(ad1.pvalue))
# model 2: count plateaus at 90 deg
class piecewise_lin(stats.rv_continuous):
def _pdf(self, x, a, b, c):
norm = 45.*a + 135.*c
m = (c-a) / 90.
if type(x) is float:
return (c + m*min([x-90, 0])) / norm
elif type(x) is np.ndarray:
return (c + m*np.min([x-90, np.zeros(len(x))], axis=0)) / norm
else:
raise TypeError('Got %s instead' % str(type(x)))
a, b, c = n_f[0]-n_f[-1]/2., n_f[-1]/2., n_f[-1]/2.
case2 = piecewise_lin(a=0, b=180, shapes='a, b, c')(a, b, c)
ad2 = ad_test(unfolded, case2)
print 'Model 1: p=%.2g (%.2f sigma)' % (ad2.pvalue, z_score(ad2.pvalue))
x = np.arange(181)
plt.plot(x, case1.pdf(x), c='C0', label='Model 1')
plt.plot(x, case2.pdf(x), c='C1', ls=':', label='Model 2')
plt.legend()
plt.ylabel('Normalized count')
plt.xlabel('Orientation angle (deg)')
plt.tight_layout()
def size_dependence():
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.r/r500'] = {'$gte':0.01, '$lt':1.5}
params1['WHL.r/r500'] = {'$gte':1.5, '$lt':10.}
# Get trends
window_size, run_x_50, run_y_25, size0, run_y_75 = get_trends(params0, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep0 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
window_size, run_x_50, run_y_25, size1, run_y_75 = get_trends(params1, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep1 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
# Downsample size1 to same length as size0
mask = (np.linspace(0,1,len(size0)) * (len(size1)-1)).astype(int)
# Get values
size0, size1 = [], []
for i in bending_15.find(params0):
size0.append(i['RGZ']['size_arcmin'])
for i in bending_15.find(params1):
size1.append(i['RGZ']['size_arcmin'])
# AD test the values
print 'Different separations:', stats.anderson_ksamp([size0, size1])
# Repeat for masses
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.M500'] = {'$lte':10}
params1['WHL.M500'] = {'$gte':15}
size0, size1 = [], []
for i in bending_15.find(params0):
size0.append(i['RGZ']['size_arcmin'])
for i in bending_15.find(params1):
size1.append(i['RGZ']['size_arcmin'])
# AD test the values
print 'Different masses:', stats.anderson_ksamp([size0, size1])
def trend_tests():
# Get trends for mass bins
params0, params1 = total_cuts.copy(), total_cuts.copy()
params0['WHL.r/r500'] = {'$lte':7.8}
params1['WHL.r/r500'] = {'$gte':7.8}
window_size, run_x_50, run_y_25, size0, run_y_75 = get_trends(params0, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep0 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
window_size, run_x_50, run_y_25, size1, run_y_75 = get_trends(params1, 'WHL.r/r500', 'RGZ.size_arcmin', bending_15, False)
sep1 = (run_x_50-run_x_50[0])/(run_x_50[-1]-run_x_50[0])
print stats.mannwhitneyu(size0, size1, alternative='two-sided')
def get_errs(get_errs=False):
first_err = np.sqrt(0.3**2+0.02**2) # https://arxiv.org/pdf/1501.01555.pdf
for source in bending_15.find().batch_size(100):
# Positional errors
if get_errs:
if 'SDSS' in source:
sql = 'select raerr, decerr from photoprimary where objid=%i' % source['SDSS']['objID']
df = SDSS_select(sql)
ra_err = df['raerr'][0]
dec_err = df['decerr'][0]
else:
ir_pos = coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
table = Irsa.query_region(ir_pos, catalog='allwise_p3as_psd', radius=1.*u.arcsec)
ra_err = table['sigra'][0]
dec_err = table['sigdec'][0]
else:
ra_err = source['best']['ra_err']
dec_err = source['best']['dec_err']
pos_err = np.sqrt(ra_err**2 + dec_err**2 + first_err**2)
# Morphology errors
area = 0
for comp in source['RGZ']['components']:
area += comp['solid_angle']
# Total errors
size = 60.* source['RGZ']['size_arcmin']
frac_pos_err = pos_err / size / 4. # fractional pos error
morph_err = area / size**2 * 180. / np.pi # total morph error in deg
frac_morph_err = morph_err / source['using_peaks']['bending_angle']
total_err = np.sqrt(frac_pos_err**2 + frac_morph_err**2)
bend_err = total_err * source['using_peaks']['bending_angle']
bending_15.update({'_id':source['_id']}, {'$set': {'best.ra_err':ra_err, 'best.dec_err':dec_err, 'best.frac_positional_err':frac_pos_err, 'RGZ.solid_angle':area, 'RGZ.frac_morphology_err':frac_morph_err, 'using_peaks.bending_frac_err':total_err, 'using_peaks.bending_err':bend_err}})
window, size, area_25, area_50, area_75 = get_trends(total_cuts, 'RGZ.size_arcmin', 'RGZ.solid_angle', bending_15, False)
m, b = np.polyfit(size, area_50, 1)
y = m*size+b
#plt.plot(size, area_50, label='Running median')
#plt.plot(size, y, label='%.0fx%+.0f\nR^2=%.3f' % (m, b, r2_score(area_50,y)))
#plt.legend()
def rmsd(params=total_cuts.copy(), x_param='RGZ.size_arcmin', y_param='bending_angle', plot=True, coll=bending_15):
x_param_list = x_param.split('.')
y0_param_list = ['using_peaks', y_param]
y1_param_list = ['using_contour', y_param]
x, y0, y1 = [], [], []
for i in coll.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y0.append(i[y0_param_list[0]][y0_param_list[1]])
y1.append(i[y1_param_list[0]][y1_param_list[1]])
x = np.array(x)
y0 = np.array(y0)
y1 = np.array(y1)
window_size = min(len(x)/10, 100)
if 'WHL.population' in params and params['WHL.population'] == 'BCG':
run_x = [0.01, 0.011]
run_y0 = 2*[np.percentile(y0, 50)]
run_y1 = 2*[np.percentile(y1, 50)]
run_rmsd = 2*[np.sqrt(sum((y0-y1)**2)/len(x))]
else:
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
if plot:
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(run_x, run_y0, label='Peak method')
ax1.plot(run_x, run_y1, label='Contour method')
ax1.legend(loc='best')
ax1.set_ylabel(get_label(y_param))
#ax1.set_title('%s comparison' % y_param)
ax2.plot(run_x, run_rmsd)
ax2.set_xlabel(get_label(x_param))
ax2.set_ylabel('RMS difference')
fig.tight_layout()
fig, ax = plt.subplots(1)
ax.plot(run_x, run_y0, label='Bending angle (peak method)')
#a, b = np.polyfit(run_x[run_x<1.05], run_rmsd[run_x<1.05], 1)
#ax.plot(run_x, a*run_x+b, label='rms difference linear fit')
ax.plot(run_x, run_rmsd, label='RMS difference', ls='--')
ax.legend(loc='best')
ax.set_xlabel(get_label(x_param))
ax.set_ylabel('Angle (deg)')
#ax.set_title('Bending error comparison')
fig.tight_layout()
return window_size, run_x, run_y0, run_y1, run_rmsd
def rmsd_debug():
params = total_cuts.copy()
params['using_peaks.bending_angle'] = params['using_peaks.bending_corrected']
del params['using_peaks.bending_corrected']
x_param = 'RGZ.size_arcmin'
y_param = 'bending_angle'
x_param_list = x_param.split('.')
y0_param_list = ['using_peaks', y_param]
y1_param_list = ['using_contour', y_param]
x, y0, y1, zid = [], [], [], []
for i in bending_15.find(params).sort(x_param, 1):
x.append(i[x_param_list[0]][x_param_list[1]])
y0.append(i[y0_param_list[0]][y0_param_list[1]])
y1.append(i[y1_param_list[0]][y1_param_list[1]])
zid.append(i['RGZ']['zooniverse_id'])
x = np.array(x)
y0 = np.array(y0)
y1 = np.array(y1)
window_size = min(len(x)/10, 100)
print 'Original sample:', len(x)
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
fig, ax = plt.subplots(1)
plt.scatter(x, y0, s=1, label='using peaks')
plt.scatter(x, y1, s=1, label='using contour')
plt.plot(run_x, run_rmsd, c='k', label='rmsd')
plt.xlabel(get_label(x_param))
plt.ylabel(get_label('%s.%s' % tuple(y0_param_list)))
'''outlier = np.logical_and(np.logical_and(x>1.17, x<1.19), np.logical_and(y1>105, y1<109))
x = x[np.logical_not(outlier)]
y1 = y1[np.logical_not(outlier)]
print np.where(outlier, zid, False)[outlier][0]
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
plt.plot(run_x, run_rmsd, c='g', label='outlier\nremoved')
ell = matplotlib.patches.Ellipse(xy=(1.18,107), width=0.06, height=13, fill=False, color='r', label='outlier')
ax.add_artist(ell)'''
logdy = np.log10(np.abs(y0-y1))
mask = logdy < 3*np.std(logdy)
outliers = np.logical_not(mask)
plt.scatter(x[outliers], y0[outliers], c='g', label='outliers', s=1)
plt.scatter(x[outliers], y1[outliers], c='g', s=1)
x = x[mask]
y0 = y0[mask]
y1 = y1[mask]
print 'Outliers:', sum(outliers)
run_x = np.array([np.percentile(x[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y0 = np.array([np.percentile(y0[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_y1 = np.array([np.percentile(y1[ix:ix+window_size], 50) for ix in np.arange(len(x)-window_size+1)])
run_rmsd = np.array([np.sqrt(sum((y0[ix:ix+window_size]-y1[ix:ix+window_size])**2)/window_size) for ix in np.arange(len(x)-window_size+1)])
plt.plot(run_x, run_rmsd, c='g', label='$<3\sigma$')
plt.legend()
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:,None]
median = np.median(points, axis=0)
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def z_score(p_vals, tail='two-sided'):
assert tail in ['one-sided', 'two-sided'], 'tail must be one-sided or two-sided'
if tail == 'one-sided':
return stats.norm.ppf(1-p_vals)
elif tail == 'two-sided':
return stats.norm.ppf(1-p_vals/2.)
def mass_comp(region):
params = total_cuts.copy()
params['WHL.population'] = region
mass = []
bend = []
for i in bending_15.find(params):
mass.append(i['WHL']['M500'])
bend.append(i['using_peaks']['bending_excess'])
mass = np.array(mass)
bend = np.array(bend)
rho = stats.spearmanr(mass, bend)
print 'p = ', rho.pvalue
print z_score(rho.pvalue), 'sigma'
w, run_mass, run_bend = get_trends(params, 'WHL.M500', 'using_peaks.bending_excess', bending_15, True, False)
popt = np.polyfit(run_mass, run_bend, 1)
lower, upper = 5, 20
print 'Rise of %.2f deg between %i and %i x 10^14 M_sun' % (popt[0]*(upper-lower), lower, upper)
return None
high = mass>12
low = mass<12
print sum(high), 'high mass sources,', sum(low), 'low mass sources'
print 'bending difference:', np.median(bend[high])-np.median(bend[low])
ad = stats.anderson_ksamp([bend[high], bend[low]])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
plt.hist(bend[high], normed=True, alpha=.8, bins=11*(np.arange(13)-1), label='$M_{500} > 1.2\\times 10^{15}~M_\\odot}$')
plt.hist(bend[low], normed=True, alpha=.8, bins=11*(np.arange(13)-1), label='$M_{500} < 1.2\\times 10^{15}~M_\\odot}$')
plt.plot(0, 0, color='w', label='$p=%f$'%ad.significance_level)
plt.legend()
plt.xlabel('Excess bending angle (deg)')
plt.ylabel('Normalized count')
def mass_pop_comp():
bent_cut = get_bent_cut()
bent = []
straight = []
for i in bending_15.find(total_cuts):
if i['using_peaks']['bending_excess']>bent_cut:
bent.append(i['WHL']['M500'])
else:
straight.append(i['WHL']['M500'])
ad = stats.anderson_ksamp([bent, straight])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
def bcg_comp():
sep = []
bend = []
for i in bending_15.find(total_cuts.copy()):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
sep = np.array(sep)
bend = np.array(bend)
bcg = sep<0.01
outer = np.logical_and(sep>1.5, sep<10)
print sum(bcg), 'BCGs,', sum(outer), 'outer sources'
ad = stats.anderson_ksamp([bend[bcg], bend[outer]])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
plt.hist(bend[bcg], normed=True, alpha=.8, bins=10*np.arange(15), label='$r/r_{500} < 0.01$')
plt.hist(bend[outer], normed=True, alpha=.8, bins=10*np.arange(15), label='$1.5 < r/r_{500} < 10$')
plt.plot(0, 0, color='w', label='p=%.5f'%ad.significance_level)
plt.legend()
plt.xlabel('Excess bending angle (deg)')
plt.ylabel('Normalized count')
def asym_comp(align='radial', update_cut=None):
if update_cut is not None:
get_asymmetry(cut)
params = total_cuts.copy()
params['WHL.alignment'] = align
params['WHL.population'] = 'inner'
print 'Alignment:', align
sep = []
asym = []
p = []
grad_p = []
for i in bending_15.find(params):
sep.append(i['WHL']['r/r500'])
asym.append(i['using_contour']['asymmetry'])
p.append(i['WHL']['P'])
grad_p.append(i['WHL']['grad_P'])
sep = np.array(sep)
asym = np.array(asym)
p = np.array(p)
grad_p = np.array(grad_p)
print 'n:', len(sep)
popt, pcov = curve_fit(lambda x, a, b: a*x+b, sep, asym)
perr = np.sqrt(np.diag(pcov))
print 'slope:', popt[0], '+-', perr[0]
s = stats.spearmanr(sep, asym)
print 'r/r500:', s.pvalue, z_score(s.pvalue)
s = stats.spearmanr(p, asym)
#s = stats.spearmanr(p[p>5e-4)], asym[p>5e-4)])
print 'P:', s.pvalue, z_score(s.pvalue)
s = stats.spearmanr(grad_p, asym)
print 'grad_P:', s.pvalue, z_score(s.pvalue)
def excess_comp():
params = total_cuts.copy()
params['WHL.population'] = 'inner'
sep = []
bend = []
for i in bending_15.find(params):
sep.append(i['WHL']['r/r500'])
bend.append(i['using_peaks']['bending_excess'])
sep = np.array(sep)
bend = np.array(bend)
s = stats.spearmanr(sep, bend)
print s, z_score(s.pvalue)
def bend_corr_comp():
params = total_cuts.copy()
params['WHL.population'] = 'outer'
morph = []
bend = []
size = []
for source in bending_15.find(params):
morph.append(2 if source['RGZ']['morphology']=='double' else 3)
bend.append(source['using_peaks']['bending_angle'])
size.append(source['RGZ']['size_arcmin'])
morph = np.array(morph)
bend = np.array(bend)
size = np.array(size)
ad = stats.anderson_ksamp([bend[morph==2], bend[morph==3]])
print 'p =', ad.significance_level
print z_score(ad.significance_level), 'sigma'
plt.hist(bend[morph==2], normed=True, alpha=0.8, label='double', bins=20)
plt.hist(bend[morph==3], normed=True, alpha=0.8, label='triple', bins=20)
plt.xlabel(get_label('using_peaks.bending_angle'))
plt.ylabel('Normalized count')
plt.legend(loc='upper right')
min_size = max(min(size[morph==2]), min(size[morph==3]))
max_size = min(max(size[morph==2]), max(size[morph==3]))
x = np.random.uniform(min_size, max_size, 1000)
f1 = lambda x: 9.7*np.exp(-1.5*(x-0.54))
f2 = lambda x: 7.6*np.exp(-1.3*(x-0.77))
def excess(bending, baseline):
diff = bending**2 - baseline**2
return np.sign(diff) * np.sqrt(np.abs(diff))
def get_median_bend(params=total_cuts.copy()):
params['WHL.population'] = 'outer'
bend = []
for i in bending_15.find(params):
bend.append(i['using_peaks']['bending_corrected'])
return np.median(bend)
def get_bending_excess(params=total_cuts.copy()):
median_bend = get_median_bend(params)
for i in bending_15.find(params):
bend = i['using_peaks']['bending_corrected']
bending_15.update({'_id':i['_id']}, {'$set':{'using_peaks.bending_excess':excess(bend, median_bend)}})
for i in bent_sources.find(params):
bend = i['using_peaks']['bending_corrected']
bent_sources.update({'_id':i['_id']}, {'$set':{'using_peaks.bending_excess':excess(bend, median_bend)}})
def get_asymmetry(cut, coll=bending_15, params=total_cuts.copy()):
for method in ['using_peaks', 'using_contour']:
print method
if 'WHL' in coll.find_one(params):
for i in coll.find(params):
angle_c = coord.Angle(i['WHL']['position_angle']*u.deg)
angle_0 = coord.Angle(i[method]['pos_angle_0']*u.deg)
angle_1 = coord.Angle(i[method]['pos_angle_1']*u.deg)
diff_0 = min( (angle_c-angle_0).wrap_at('360d'), (angle_0-angle_c).wrap_at('360d') )
diff_1 = min( (angle_c-angle_1).wrap_at('360d'), (angle_1-angle_c).wrap_at('360d') )
if diff_0 < diff_1:
inner = i[method]['tail_deg_0']
outer = i[method]['tail_deg_1']
else:
inner = i[method]['tail_deg_1']
outer = i[method]['tail_deg_0']
if (diff_0.degree<cut and diff_1.degree>180-cut) or (diff_1.degree<cut and diff_0.degree>180-cut):
alignment = 'radial'
elif cut<diff_0.degree and diff_0.degree<180-cut and cut<diff_1.degree and diff_1.degree<180-cut:
alignment = 'tangential'
else:
alignment = 'other'
if method == 'using_contour':
coll.update({'_id':i['_id']}, {'$set':{method+'.asymmetry':inner/outer, 'WHL.alignment':alignment}})
else:
coll.update({'_id':i['_id']}, {'$set':{method+'.asymmetry':inner/outer}})
else:
for i in coll.find(params):
if np.random.randint(0,2):
inner = i[method]['tail_deg_0']
outer = i[method]['tail_deg_1']
else:
inner = i[method]['tail_deg_1']
outer = i[method]['tail_deg_0']
coll.update({'_id':i['_id']}, {'$set':{method+'.asymmetry':inner/outer}})
def pressure_calc():
'''Calculates the pressure (in keV/cm^3) using Arnaud et al. 2010'''
h70 = float(cosmo.H(0) / (70.*u.km/u.Mpc/u.s) )
alphaP = 1./0.561 - 5./3.
P0 = 8.403*pow(h70,-1.5)
c500, gamma, alpha, beta = 1.177, 0.3081, 1.0510, 5.4905
h = lambda z: float(cosmo.H(z) / cosmo.H(0))
P500 = lambda z, M500: 1.65e-3 * pow(h(z),8./3.) * pow(M500*h70/3.,2./3.) * pow(h70,2.)
pp = lambda x: P0 * pow(c500*x,-1*gamma) * pow(1+pow(c500*x,alpha),(gamma-beta)/alpha)
alphaP_prime = lambda x: 0.10 - (alphaP+0.10) * pow(2*x,3) / (1+pow(2*x,3))
P = lambda x, z, M500: P500(z,M500) * pp(x) * pow(M500*h70/3.,alphaP+alphaP_prime(x))
for source in bending_15.find():
x = source['WHL']['r/r500']
z = source['WHL']['zbest']
M500 = source['WHL']['M500']
theta0 = np.abs(source['using_peaks']['pos_angle_0'] - source['WHL']['position_angle'])
theta1 = np.abs(source['using_peaks']['pos_angle_1'] - source['WHL']['position_angle'])
if theta0>90:
theta0 = 180 - theta0
if theta1>90:
theta1 = 180 - theta1
dx0 = source['using_contour']['tail_kpc_0'] * np.cos(theta0) / 1000. / source['WHL']['r500']
dx1 = source['using_contour']['tail_kpc_1'] * np.cos(theta1) / 1000. / source['WHL']['r500']
dP = np.abs( (P(np.abs(x+dx0),z,M500) - P(np.abs(x+dx1),z,M500)) / (dx0 - dx1) )
bending_15.update({'_id':source['_id']}, {'$set':{'WHL.P500':P500(z,M500), 'WHL.P':P(x,z,M500), 'WHL.grad_P':dP}})
def get_fits(params, outd):
import shutil
if os.path.exists(outd):
count = len(os.listdir(outd))
cont = raw_input('%s exists and contains %i files; continue? (y/n) ' % (outd,count))
if cont.lower() == 'n':
return
else:
print 'Initializing', outd
os.makedirs(outd)
with open('%s/first_fits.txt' % rgz_path) as f:
lines = f.readlines()
pathdict = {}
for l in lines:
spl = l.split(' ')
pathdict[spl[1].strip()] = '%s/rgz/raw_images/RGZ-full.%i/FIRST-IMGS/%s.fits' % (data_path, int(spl[0]), spl[1].strip())
for source in bending_15.find(params).sort('WHL.r/r500', -1).limit(55):
print source['WHL']['r/r500']
zid = source['RGZ']['zooniverse_id']
fid = catalog.find_one({'zooniverse_id':zid})['first_id']
fits_loc = pathdict[fid]
shutil.copy(fits_loc, outd+fid+'.fits')
def make_sdss_sample():
if not sdss_sample.count():
sdss_sample.create_index('bestObjID', unique=True)
else:
print '%i entries already in catalog' % sdss_sample.count()
params = total_cuts.copy()
params['best.ra'] = {'$gt':100, '$lt':275}
zs, ras, decs = [], [], []
for source in bending_15.find(params):
zs.append(source['best']['redshift'])
ras.append(source['best']['ra'])
decs.append(source['best']['dec'])
zs = np.array(zs)
ras = np.array(ras)
decs = np.array(decs)
for x in [zs, ras, decs]:
x.sort()
zs_cdf = np.arange(len(zs))/float(len(zs)-1)
zs_cdf_inv = interp1d(zs_cdf, zs)
ras_cdf = np.arange(len(ras))/float(len(ras)-1)
ras_cdf_inv = interp1d(ras_cdf, ras)
decs_cdf = np.arange(len(decs))/float(len(decs)-1)
decs_cdf_inv = interp1d(decs_cdf, decs)
for i in range(20000):
probs = np.random.uniform(0,1,3)
z = zs_cdf_inv(probs[0])
ra = ras_cdf_inv(probs[1])
dec = decs_cdf_inv(probs[2])
query = '''select top 1 s.bestObjID, s.ra, s.dec, s.z, s.zErr,
g.cModelMag_u, g.cModelMag_g, g.cModelMag_r, g.cModelMag_i, g.cModelMag_z
from SpecObj as s
join GalaxyTag as g on s.bestObjID=g.objID
where s.class="GALAXY" and s.zWarning=0 and (s.z between %f and %f) and (s.ra between %f and %f) and
(s.dec between %f and %f)''' % (0.99*z, 1.01*z, 0.99*ra, 1.01*ra, 0.99*dec, 1.01*dec)
df = SDSS_select(query)
if len(df):
entry = {}
for key in df:
entry[key] = df[key][0]
try:
sdss_sample.insert(entry)
except pymongo.errors.DuplicateKeyError as e:
print e
pass
print '%i/%i' % (sdss_sample.count(), i+1)
def sdss_patch():
for s in sdss_sample.find({'cModelMag_u':{'$exists':False}}).batch_size(50):
query = '''select cModelMag_u, cModelMag_g, cModelMag_r, cModelMag_i, cModelMag_z
from GalaxyTag as g
where objID=%s''' % s['bestObjID']
df = SDSS_select(query)
update = {}
try:
for key in df:
update[key] = df[key][0]
sdss_sample.update({'_id':s['_id']}, {'$set':update})
except IndexError:
pass
for s in sdss_sample.find():
xmatch.update({'SDSS._id':s['_id']}, {'$set':{'SDSS':s}})
def plot_sdss_sample():
params = total_cuts.copy()
params['best.ra'] = {'$gt':100, '$lt':275}
params['SDSS.i'] = {'$exists':True}
zs, ras, decs, mags = [], [], [], []
for source in bending_15.find(params):
zs.append(source['best']['redshift'])
ras.append(source['best']['ra'])
decs.append(source['best']['dec'])
mags.append(source['SDSS']['i'])
z_r, ra_r, dec_r, mag_r = [], [], [], []
for source in sdss_sample.find({'cModelMag_i':{'$exists':True}}):
z_r.append(source['z'])
ra_r.append(source['ra'])
dec_r.append(source['dec'])
mag_r.append(source['cModelMag_i'])
fig, ax = plt.subplots(1)
ax.hist(zs, bins=15, alpha=.8, normed=True, label='Bending sample')
ax.hist(z_r, bins=15, alpha=.8, normed=True, label='SDSS sample')
ax.legend()
ax.set_xlabel('z')
ax.set_ylabel('Normalized count')
ax.set_title('Redshift distribution (northern region)')
fig, ax = plt.subplots(1)
ax.hist(mags, bins=15, alpha=.8, normed=True, label='Bending sample')
ax.hist(mag_r, bins=15, alpha=.8, normed=True, label='SDSS sample')
ax.legend()
ax.set_xlabel('i-band (mag)')
ax.set_ylabel('Normalized count')
ax.set_title('i-band magnitude distribution (northern region)')
fig, ax = plt.subplots(1)
ax.scatter(ras, decs, s=1, alpha=.8, label='Bending sample')
ax.scatter(ra_r, dec_r, s=1, alpha=.8, label='SDSS sample')
ax.legend()
ax.set_xlabel('RA (deg)')
ax.set_ylabel('Dec (deg)')
ax.set_title('Skymap of sources (northern region)')
def sdss_xmatch():
count = 0
for source in sdss_sample.find().batch_size(50):
count += 1
ir = coord.SkyCoord(source['ra'], source['dec'], unit=(u.deg,u.deg), frame='icrs')
cluster_w = get_whl(ir, source['z'], source['zErr'], 15, 0.04*(1+source['z']))
if cluster_w is not None:
whl_prop = {}
c_pos = coord.SkyCoord(cluster_w['RAdeg'], cluster_w['DEdeg'], unit=(u.deg,u.deg), frame='icrs')
c_sep_arc = c_pos.separation(ir)
zbest = cluster_w['zspec'] if 'zspec' in cluster_w else cluster_w['zphot']
c_sep_mpc = float(cosmo.angular_diameter_distance(zbest)/u.Mpc * c_sep_arc.to(u.rad)/u.rad)
c_pos_angle = c_pos.position_angle(ir)
r = c_sep_mpc/cluster_w['r500']
if r < 0.01:
pop = 'BCG'
elif r >= 1.5:
pop = 'outer'
else:
pop = 'inner'
whl_prop = {'ra':c_pos.ra.deg, 'dec':c_pos.dec.deg, 'separation_deg':c_sep_arc.deg, 'separation_Mpc':c_sep_mpc, 'r/r500':r, 'population':pop, 'zbest':zbest}
for key in ['_id', 'N500', 'N500sp', 'RL*500', 'name', 'r500', 'zphot', 'zspec', 'M500']:
if key in cluster_w:
whl_prop[key] = cluster_w[key]
entry = {'SDSS':source, 'WHL':whl_prop}
print '%i/%i' % (xmatch.count(), count)
xmatch.insert(entry)
def sdss_density():
params = total_cuts.copy()
#params['SDSS.spec_redshift'] = {'$exists':True}
f1, (ax1, ax2) = plt.subplots(1, 2, sharey=True, subplot_kw=dict(adjustable='datalim', aspect='equal'))
ax = f1.add_subplot(111, frameon=False)
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
f2, ax0 = plt.subplots(1)
sep, n, bins = r500_hist(bending_15, params, 20)
bin_area = np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
density = n/bin_area
err = np.sqrt(n)/bin_area
ax1.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='RGZ')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
if sum(np.isnan(logyerr)):
logx = logx[np.logical_not(np.isnan(logyerr))]
logy = logy[np.logical_not(np.isnan(logyerr))]
logyerr = logyerr[np.logical_not(np.isnan(logyerr))]
fitfunc = lambda p, x: p[0]*x + p[1]
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax1.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
err_max = np.maximum( (index+index_err)*np.log10(bins)+(amp+amp_err), (index-index_err)*np.log10(bins)+(amp+amp_err) )
err_min = np.minimum( (index+index_err)*np.log10(bins)+(amp-amp_err), (index-index_err)*np.log10(bins)+(amp-amp_err) )
#ax1.fill_between(np.log10(bins), err_min, err_max, color='k', lw=0, alpha=.5, label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
ax1.legend(loc='upper right')
ax0.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='RGZ')
ax0.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
r_bcgs = n[0]
r_cluster = sum(n[bins<10])
r_bcg_ratio = 1.*r_bcgs/r_cluster
r_bcg_err = r_bcg_ratio * (1./np.sqrt(r_bcgs) + 1./np.sqrt(r_cluster))
print 'RGZ BCGs: %i/%i (%.1f pm %.1f)%%' % (r_bcgs, r_cluster, 100*r_bcg_ratio, 100*r_bcg_err)
r_within_r500 = sum(n[bins<1][1:])
r_cluster_no_bcgs = sum(n[bins<10][1:])
r_within_ratio = 1.*r_within_r500/r_cluster_no_bcgs
r_within_err = r_within_ratio * (1./np.sqrt(r_within_r500) + 1./np.sqrt(r_cluster_no_bcgs))
print 'RGZ r500: %i/%i (%.1f pm %.1f)%%' % (r_within_r500, r_cluster_no_bcgs, 100*r_within_ratio, 100*r_within_err)
sep, n, bins = r500_hist(xmatch, {}, 20)
bin_area = np.pi * np.array( [np.square(bins[0])] + [np.square(bins[i+1])-np.square(bins[i]) for i in np.arange(len(bins)-1)] )
density = n/bin_area
err = np.sqrt(n)/bin_area
ax2.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='SDSS')
logx = np.log10(bins[1:-3])
logy = np.log10(density[1:-3])
logyerr = err[1:-3] / density[1:-3]
out = leastsq(errfunc, [-1,1], args=(logx, logy, logyerr), full_output=1)
index, amp = out[0]
index_err = np.sqrt(out[1][1][1])
amp_err = np.sqrt(out[1][0][0])*amp
ax2.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
err_max = np.maximum( (index+index_err)*np.log10(bins)+(amp+amp_err), (index-index_err)*np.log10(bins)+(amp+amp_err) )
err_min = np.minimum( (index+index_err)*np.log10(bins)+(amp-amp_err), (index-index_err)*np.log10(bins)+(amp-amp_err) )
#ax2.fill_between(np.log10(bins), err_min, err_max, color='k', lw=0, alpha=.5, label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
ax2.legend(loc='upper right')
ax0.errorbar(np.log10(bins), np.log10(density), yerr=np.vstack((np.log10(density+err) - np.log10(density), np.log10(density) - np.log10(density-err))), fmt='o', ms=4, label='SDSS')
ax0.plot(np.log10(bins), index*np.log10(bins)+amp, c='k', ls='--', label='$\\alpha = $\n$ %.2f\pm%.2f$'%(index,index_err))
ax0.legend(loc='upper right')
s_bcgs = n[0]
s_cluster = sum(n[bins<10])
s_bcg_ratio = 1.*s_bcgs/s_cluster
s_bcg_err = s_bcg_ratio * (1./np.sqrt(s_bcgs) + 1./np.sqrt(s_cluster))
print 'SDSS BCGs: %i/%i (%.1f pm %.1f)%%' % (s_bcgs, s_cluster, 100*s_bcg_ratio, 100*s_bcg_err)
s_within_r500 = sum(n[bins<1][1:])
s_cluster_no_bcgs = sum(n[bins<10][1:])
s_within_ratio = 1.*s_within_r500/s_cluster_no_bcgs
s_within_err = s_within_ratio * (1./np.sqrt(s_within_r500) + 1./np.sqrt(s_cluster_no_bcgs))
print 'SDSS r500: %i/%i (%.1f pm %.1f)%%' % (s_within_r500, s_cluster_no_bcgs, 100*s_within_ratio, 100*s_within_err)
ax.set_xlabel(get_label('WHL.r/r500', True))
ax1.set_ylabel('$\\log_{10}$ (Surface density of galaxies [$r_{500}^{-2}$])')
#ax.set_title('Surface density vs. separation')
f1.tight_layout()
ax0.set_xlabel(get_label('WHL.r/r500', True))
ax0.set_ylabel('$\\log_{10}$ (Surface density of galaxies [$r_{500}^{-2}$])')
#ax0.set_title('Surface density vs. separation')
f2.tight_layout()
r_s_bcgs = r_bcg_ratio / s_bcg_ratio
r_s_bcgs_err = r_s_bcgs * (r_bcg_err/r_bcg_ratio + s_bcg_err/s_bcg_ratio)
r_s_within = r_within_ratio / s_within_ratio
r_s_within_err = r_s_within * (r_within_err/r_within_ratio + s_within_err/s_within_ratio)
print 'BCG excess: %.3f pm %.3f\nr500 excess: %.3f pm %.3f' % (r_s_bcgs, r_s_bcgs_err, r_s_within, r_s_within_err)
def fractional_bent():
params = total_cuts.copy()
params['best.redshift']['$lte'] = 0.6
sep, bend = [], []
for source in bending_15.find(total_cuts.copy()):
sep.append(source['WHL']['r/r500'])
bend.append(source['using_peaks']['bending_excess'])
sep_field, bend_field = [], []
for source in bent_sources.find(total_cuts.copy()):
if bending_15.find_one({'RGZ.RGZ_id':source['RGZ']['RGZ_id']}) is None:
sep_field.append(99)
bend_field.append(source['using_peaks']['bending_excess'])
sep = np.array(sep+sep_field)
bend = np.array(bend+bend_field)
straight = bend<0
marginal = np.logical_and(0<bend, bend<5)
bent = bend>5
inner = sep<1.5#np.logical_and(sep>0.01, sep<1.5)
cluster = sep<10
num, denom = sum(sep<1.5), len(sep)
print 'All: %.2f +- %.2f' % (1.*num/denom, 1.*num/denom * (1./np.sqrt(num) + 1./np.sqrt(denom)))
num, denom = sum(np.logical_and(sep<1.5, bend>get_bent_cut())), sum(bend>get_bent_cut())
print 'Highly bent: %.2f +- %.2f' % (1.*num/denom, 1.*num/denom * (1./np.sqrt(num) + 1./np.sqrt(denom)))
num, denom = sum(np.logical_and(sep<1.5, bend>50)), sum(bend>50)
print 'Max: %.2f +- %.2f' % (1.*num/denom, 1.*num/denom * (1./np.sqrt(num) + 1./np.sqrt(denom)))
fig, ax = plt.subplots(1)
n_all, bins, _ = ax.hist(np.log10(bend[bent]), bins=15, label='All sources')
n_all = np.insert(n_all, 0, [sum(straight), sum(marginal)])
n_inner, _, _ = ax.hist(np.log10(bend[np.logical_and(bent,inner)]), bins=bins, label='Sources within 1.5 r500')
n_inner = np.insert(n_inner, 0, [sum(np.logical_and(straight,inner)), sum(np.logical_and(marginal,inner))])
ax.set_xlabel('log(Bending excess [deg])')
ax.set_ylabel('Count')
#ax.set_title('Distributions of bending excess')
ax.legend()
plt.tight_layout()
fig, ax = plt.subplots(1)
lin_bins = np.insert(pow(10,(bins[:-1]+bins[1:])/2.), 0, [0, np.median(bend[marginal])])
n_inner = np.array([ sum(bend[inner]>i) for i in lin_bins ], dtype=float)
n_all = np.array([ sum(bend>i) for i in lin_bins ], dtype=float)
ratio = n_inner/n_all
err = ratio * np.sqrt(1/n_inner + 1/n_all)
plt.errorbar(lin_bins, ratio, yerr=err)
ax.set_xlabel('Excess bending angle (deg)')
ax.set_ylabel('Fraction within $1.5~r_{500}$')
#ax.set_title('Fraction of sources in BCG/inner regions of clusters')
plt.tight_layout()
def histedges_equalN(x, nbin):
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1), np.arange(npt), np.sort(x))
def import_env_csv1():
params = total_cuts.copy()
count = 0
env = {}
dirname = '/home/garon/Documents/RGZdata/bending/'
for filename in ['distantstraight_env.csv', 'distantbent_env.csv']:
with open(dirname+filename, 'r') as f:
r = csv.reader(f)
r.next()
for objID, ra, dec, z, n_objID, n_ra, n_dec, n_z, z_type, cModelMag_r, fracDeV_r, dist in r:
ra, dec, z = float(ra), float(dec), float(z)
params['best.ra'] = {'$gte':ra-1e-3, '$lte':ra+1e-3}
params['best.dec'] = {'$gte':dec-1e-3, '$lte':dec+1e-3}
params['best.redshift'] = {'$gte':z-1e-3, '$lte':z+1e-3}
source = bending_15.find_one(params)
if source is not None:
objID = source['SDSS']['objID']
if objID not in env:
env[objID] = {'objID':objID, 'ra':ra, 'dec':dec, 'z':z, 'bending_excess':source['using_peaks']['bending_excess'], 'neighbors':{}, 'r/r500':source['WHL']['r/r500']}
if np.abs(z-float(n_z))/(1+z) <= 0.04:
ADD = float(cosmo.angular_diameter_distance(z)/u.Mpc)
env[objID]['neighbors'][n_objID] = {'ra':float(n_ra), 'dec':float(n_dec), 'z':float(n_z), 'z_type':z_type, 'cModelMag_r':float(cModelMag_r), 'fracDeV_r':float(fracDeV_r), 'dist_deg':float(dist), 'dist_Mpc':float(dist)*np.pi/180*ADD}
if len(env)>count:
count += 1
print count
db.drop_collection('distant_sources')
distant_sources.create_index('objID', unique=True)
for key in env:
distant_sources.insert(env[key])
def import_env_csv2():
count = 0
env = {}
dirname = '/home/garon/Documents/RGZdata/bending/'
for filename in ['more_source_envPhotoz.csv']:
with open(dirname+filename, 'r') as f:
r = csv.reader(f)
r.next()
for objID, ra, dec, z, bending, r500, n_objID, n_ra, n_dec, n_z, z_type, cModelMag_r, fracDeV_r, dist in r:
ra, dec, z, bending, r500 = float(ra), float(dec), float(z), float(bending), float(r500)
if objID not in env:
env[objID] = {'objID':objID, 'ra':ra, 'dec':dec, 'z':z, 'bending_excess':bending, 'r/r500':r500, 'neighbors':{}}
if objID != n_objID and np.abs(z-float(n_z))/(1+z) <= 0.04:
ADD = float(cosmo.angular_diameter_distance(z)/u.Mpc)
env[objID]['neighbors'][n_objID] = {'ra':float(n_ra), 'dec':float(n_dec), 'z':float(n_z), 'z_type':z_type, 'cModelMag_r':float(cModelMag_r), 'fracDeV_r':float(fracDeV_r), 'dist_deg':float(dist), 'dist_Mpc':float(dist)*np.pi/180*ADD}
if len(env)>count:
count += 1
print count
#db.drop_collection('distant_sources')
#distant_sources.create_index('objID', unique=True)
for key in env:
distant_sources.insert(env[key])
def import_env():
import_env_csv1()
import_env_csv2()
for source in distant_sources.find({'r/r500':{'$exists':False}}):
s2 = bending_15.find_one({'SDSS.objID':source['objID']})
distant_sources.update({'_id':source['_id']}, {'$set':{'r/r500':s2['WHL']['r/r500']}})
def find_more_sources():
bent_cut = get_bent_cut()
with open('more_sources.csv', 'w') as f:
print >> f, 'objID,ra,dec,z,bending,r/r500'
count = distant_sources.find({'bending_excess':{'$gt':bent_cut}}).count()
for source in bending_15.find(total_cuts):
if source['using_peaks']['bending_excess']>30 and source['WHL']['r/r500']>6 and not distant_sources.find({'objID':source['SDSS']['objID']}).count():
print >> f, '%s,%s,%s,%s,%s,%s' % (source['SDSS']['objID'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['using_peaks']['bending_excess'], source['WHL']['r/r500'])
count += 1
if count>=150:
break
print count, 'bent'
with open('more_sources.csv', 'a') as f:
count = distant_sources.find({'bending_excess':{'$lt':bent_cut}}).count()
for source in bending_15.find(total_cuts):
if source['using_peaks']['bending_excess']<10 and 6<source['WHL']['r/r500']<15 and not distant_sources.find({'objID':source['SDSS']['objID']}).count():
print >> f, '%s,%s,%s,%s,%s,%s' % (source['SDSS']['objID'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['using_peaks']['bending_excess'], source['WHL']['r/r500'])
count += 1
if count>=150:
break
print count, 'straight'
def distant_env(bin_count=8, inner_cut=0.05, outer_cut=0.35):
bent_cut = get_bent_cut()
dist_bent, dist_straight, sep = [], [], []
z_bent, z_straight = [], []
for source in distant_sources.find():
if source['bending_excess']>bent_cut:
for neighbor in source['neighbors']:
dist_bent.append(source['neighbors'][neighbor]['dist_Mpc'])
z_bent.append(source['z'])
else:
for neighbor in source['neighbors']:
dist_straight.append(source['neighbors'][neighbor]['dist_Mpc'])
z_straight.append(source['z'])
sep.append(source['r/r500'])
dist_bent = np.array(dist_bent)
dist_bent = dist_bent[dist_bent>inner_cut]
dist_straight = np.array(dist_straight)
dist_straight = dist_straight[dist_straight>inner_cut]
for dist in [dist_bent, dist_straight]:
dist.sort()
n_bent = distant_sources.find({'bending_excess':{'$gt':bent_cut}}).count()
n_straight = distant_sources.find({'bending_excess':{'$lt':bent_cut}}).count()
density_bent = stats.rankdata(dist_bent) / (np.pi*dist_bent**2) / n_bent
density_straight = stats.rankdata(dist_straight) / (np.pi*dist_straight**2) / n_straight
plt.figure()
plt.plot(dist_bent, density_bent, label='Highly bent sources')
plt.plot(dist_straight, density_straight, ls='--', label='Less bent sources')
plt.legend()
plt.xlabel('Projected distance from radio galaxy (Mpc)')
plt.ylabel('Companion density (Mpc$^{-2}$)')
plt.tight_layout()
inner, outer = 2., 3.
mask = np.logical_and(inner<dist_bent, dist_bent<outer)
bg_count_bent = sum(mask)
bg_bent = bg_count_bent / np.pi / (outer**2-inner**2)# / n_bent
mask = np.logical_and(2<dist_straight, dist_straight<3)
bg_count_straight = sum(mask)
bg_straight = bg_count_straight / np.pi / (outer**2-inner**2)# / n_straight
plt.figure()
per_bin_bent, bins, _ = plt.hist(dist_bent, bins=np.linspace(inner_cut, 2, bin_count+1))
bin_bg_bent = bg_bent*np.pi*(bins[1:]**2-bins[:-1]**2)
frac_bent = per_bin_bent / bin_bg_bent
frac_err_bent = frac_bent * (1./np.sqrt(per_bin_bent) + 1./np.sqrt(bg_bent))
per_bin_straight, _, _ = plt.hist(dist_straight, bins=bins)
bin_bg_straight = bg_straight*np.pi*(bins[1:]**2-bins[:-1]**2)
frac_straight = per_bin_straight / bin_bg_straight
frac_err_straight = frac_straight * (1./np.sqrt(per_bin_straight) + 1./np.sqrt(bg_straight))
plt.figure()
plt.errorbar(bins[1:], frac_bent, frac_err_bent, label='Highly bent sources')
plt.errorbar(bins[1:]-0.01, frac_straight, frac_err_straight, ls='--', label='Less bent sources')
plt.legend(loc='upper right')
plt.xlabel('Projected distance from radio galaxy (Mpc)')
plt.ylabel('Normalized surface density')
plt.tight_layout()
# 2D statistics
area = np.pi * (outer_cut**2-inner_cut**2)
cum_bent = sum(dist_bent<outer_cut)
cum_excess_bent = cum_bent - bg_bent*area
err_bent = np.sqrt(cum_bent+bg_bent*area**2)
frac_bent = (cum_excess_bent/area) / bg_bent
frac_excess_bent = frac_bent - 1.
frac_err_bent = frac_bent * (err_bent/cum_excess_bent + 1./np.sqrt(bg_count_bent))
cum_straight = sum(dist_straight<outer_cut)
cum_excess_straight = cum_straight - bg_straight*area
err_straight = np.sqrt(cum_straight+bg_straight*area**2)
frac_straight = (cum_excess_straight/area) / bg_straight
frac_excess_straight = frac_straight - 1.
frac_err_straight = frac_straight * (err_straight/cum_excess_straight + 1./np.sqrt(bg_count_straight))
print 'Surface density between %i to %i kpc' % (int(inner_cut*1000), int(outer_cut*1000))
print 'Bent: %.3f +- %.3f (%.2f sigma)' % (frac_bent, frac_err_bent, frac_bent / frac_err_bent)
print 'Straight: %.3f +- %.3f (%.2f sigma)' % (frac_straight, frac_err_straight, frac_straight / frac_err_straight)
print 'Difference: %.3f +- %.3f (%.2f sigma)' % (frac_bent-frac_straight, np.sqrt(frac_err_bent**2 + frac_err_straight**2), (frac_bent-frac_straight) / np.sqrt(frac_err_bent**2 + frac_err_straight**2))
# 3D statistics
vol_sph = 4./3. * np.pi * (outer_cut**3-inner_cut**3)
vol_cyl = np.pi * outer * (outer_cut**2-inner_cut**2)
vol_bg = np.pi * outer * (outer**2-inner**2)
bg_bent_vol = bg_count_bent / vol_bg
cum_bent = sum(dist_bent<outer_cut)
cum_excess_bent = cum_bent - bg_bent_vol*vol_cyl
err_bent = np.sqrt(cum_bent + bg_bent_vol*vol_cyl**2)
frac_bent = (cum_excess_bent/vol_sph) / bg_bent_vol
frac_excess_bent = frac_bent - 1.
frac_err_bent = frac_bent * (err_bent/cum_excess_bent + 1./np.sqrt(bg_count_bent))
bg_straight_vol = bg_count_straight / vol_bg
cum_straight = sum(dist_straight<outer_cut)
cum_excess_straight = cum_straight - bg_straight_vol*vol_cyl
err_straight = np.sqrt(cum_straight + bg_straight_vol*vol_cyl**2)
frac_straight = (cum_excess_straight/vol_sph) / bg_straight_vol
frac_excess_straight = frac_straight - 1.
frac_err_straight = frac_straight * (err_straight/cum_excess_straight + 1./np.sqrt(bg_count_straight))
print '\nVolume density between %i to %i kpc' % (int(inner_cut*1000), int(outer_cut*1000))
print 'Bent: %.3f +- %.3f (%.2f sigma)' % (frac_bent, frac_err_bent, frac_bent / frac_err_bent)
print 'Straight: %.3f +- %.3f (%.2f sigma)' % (frac_straight, frac_err_straight, frac_straight / frac_err_straight)
print 'Difference: %.3f +- %.3f (%.2f sigma)' % (frac_bent-frac_straight, np.sqrt(frac_err_bent**2 + frac_err_straight**2), (frac_bent-frac_straight) / np.sqrt(frac_err_bent**2 + frac_err_straight**2))
med_sep = np.median(sep)
rho_bg = 500. / med_sep**2
print '%.1f, %.1f rho_crit around highly bent and less bent sources' % (frac_bent*rho_bg, frac_straight*rho_bg)
return None
step = inner_cut
x = np.arange(step, 1, step)+step
cum_excess_bent, cum_excess_straight = [], []
frac_excess_bent, frac_excess_straight = [], []
err_bent, err_straight = [], []
frac_err_bent, frac_err_straight = [], []
for i in x:
cum_bent = sum(dist_bent<i)
cum_excess_bent.append(cum_bent - bg_bent*np.pi*i**2)
err_bent.append(np.sqrt(cum_bent+bg_bent*(np.pi*i**2)**2))
frac_bent = cum_bent/(bg_bent*np.pi*i**2)
frac_excess_bent.append(frac_bent - 1.)
frac_err_bent.append(frac_bent * (1./np.sqrt(cum_bent) + 1./np.sqrt(bg_bent)))
cum_straight = sum(dist_straight<i)
cum_excess_straight.append(cum_straight - bg_straight*np.pi*i**2)
err_straight.append(np.sqrt(cum_straight+bg_straight*(np.pi*i**2)**2))
frac_straight = cum_straight/(bg_straight*np.pi*i**2)
frac_excess_straight.append(frac_straight - 1.)
frac_err_straight.append(frac_straight * (1./np.sqrt(cum_straight) + 1./np.sqrt(bg_straight)))
# normalize
cum_excess_bent_norm = np.array(cum_excess_bent)/n_bent
err_bent_norm = cum_excess_bent_norm * (np.array(err_bent)/np.array(cum_excess_bent) + 1./np.sqrt(n_bent))
cum_excess_straight_norm = np.array(cum_excess_straight)/n_straight
err_straight_norm = cum_excess_straight_norm * (np.array(err_straight)/np.array(cum_excess_straight) + 1./np.sqrt(n_straight))
# cumulative excess plot
plt.figure()
plt.errorbar(x, cum_excess_bent_norm, yerr=err_bent_norm, label='Bent sources')
plt.errorbar(x+0.15*step, cum_excess_straight_norm, yerr=err_straight_norm, ls='--', label='Straight sources')
#plt.axhline(0, ls=':', c='k')
plt.legend(loc='upper left')
plt.xlabel('Radius around radio galaxy (Mpc)')
plt.ylabel('Cumulative excess density (Mpc$^{-2}$)')
plt.tight_layout()
# fractional excess plot
plt.figure()
plt.errorbar(x, frac_excess_bent, yerr=frac_err_bent, label='Bent sources')
plt.errorbar(x+0.15*step, frac_excess_straight, yerr=frac_err_straight, ls='--', label='Straight sources')
#plt.axhline(0, ls=':', c='k')
plt.legend(loc='upper right')
plt.xlabel('Radius around radio galaxy (Mpc)')
plt.ylabel('Fractional excess density')
plt.tight_layout()
return None
def sig(i, j):
frac_b = cum_bent / bg_bent
frac_s = cum_straight / bg_straight
dfrac_b = frac_b * (1/np.sqrt(cum_bent) + 1/np.sqrt(bg_bent)) / np.sqrt(1.*i/n_bent)
dfrac_s = frac_s * (1/np.sqrt(cum_straight) + 1/np.sqrt(bg_straight)) / np.sqrt(1.*j/n_straight)
return (frac_b - frac_s) / max(dfrac_b, dfrac_s)
# statistical excess plot
plt.figure()
plt.plot(x, cum_excess_bent_norm/err_bent_norm, label='Bent sources')
plt.plot(x+0.15*step, cum_excess_straight_norm/err_straight_norm, ls='--', label='Straight sources')
plt.legend(loc='upper right')
plt.xlabel('Radius around RG (Mpc)')
plt.ylabel('Standard deviations above background')
plt.tight_layout()
# statistical difference plot
plt.figure()
plt.plot(x, (cum_excess_bent_norm-cum_excess_straight_norm)/np.max([err_bent_norm,err_straight_norm], 0))
plt.xlabel('Radius around RG (Mpc)')
plt.ylabel('Standard deviations difference')
plt.tight_layout()
# distribution of bending vs density
bent_companions, straight_companions = [], []
for source in distant_sources.find():
count = 0
for neighbor in source['neighbors']:
if 0.05 < source['neighbors'][neighbor]['dist_Mpc'] < 0.25:
count += 1
if source['bending_excess']>bent_cut:
bent_companions.append(count)
else:
straight_companions.append(count)
plt.figure()
plt.hist(bent_companions, bins=range(13), alpha=.8, normed=True, label='Bent sources')
plt.hist(straight_companions, bins=range(13), alpha=.8, normed=True, label='Straight sources')
plt.scatter(6, .1, c='w', alpha=0, label='$p=%.3f$'%stats.anderson_ksamp([bent_companions, straight_companions]).significance_level)
plt.xlabel('Number of companions between 50 and 250 kpc')
plt.ylabel('Normalized count')
plt.legend()
plt.tight_layout()
# bending vs density
bending, neighbors = [], []
for source in distant_sources.find():
bending.append(source['bending_excess'])
count = 0
for neighbor in source['neighbors']:
if 0.05 < source['neighbors'][neighbor]['dist_Mpc'] < 0.25:
count += 1
neighbors.append(count)
bending = np.array(bending)
neighbors = np.array(neighbors)
order = bending.argsort()
bg = (bg_bent+bg_straight)/(n_bent+n_straight)*np.pi*(0.25**2-0.05**2)
plt.figure()
n, bins, patches = plt.hist(bending)
plt.figure()
cut = 1.5
plt.hist(bending[neighbors/bg>cut], bins=bins, alpha=.8, normed=True, label='Density $>%.1f\\times$background\n$n=%i$'%(cut,sum(neighbors/bg>cut)))
plt.hist(bending[neighbors/bg<cut], bins=bins, alpha=.8, normed=True, label='Density $<%.1f\\times$background\n$n=%i$'%(cut,sum(neighbors/bg<cut)))
plt.xlabel('Excess bending angle (deg)')
plt.ylabel('Normalized count')
plt.legend()
plt.tight_layout()
def cluster_spotting(compress=False):
n_bent = np.array([22, 53, 81, 101, 112, 121, 113, 162, 146, 207, 208, 210, 244, 228, 270, 275, 317, 271, 330, 328, 301, 326, 354, 378, 394, 414, 419, 435, 444, 529])
n_straight = np.array([35, 51, 61, 94, 100, 114, 117, 154, 136, 169, 167, 216, 188, 234, 241, 255, 267, 282, 304, 309, 357, 335, 335, 366, 368, 411, 433, 484, 463, 516]) * 90./96. # normalized
base_step_size = 0.1 # Mpc
base_step_count = 30
if compress:
step_size = 2.*base_step_size
step_count = base_step_count/2.
n_bent = n_bent[::2] + n_bent[1::2]
n_straight = n_straight[::2] + n_straight[1::2]
else:
step_size = base_step_size
step_count = base_step_count
bg_bent = 254
bg_straight = 241
sep = step_size*(np.arange(step_count)+1)
excess_bent = n_bent - bg_bent * np.pi * (sep**2-(sep-step_size)**2)
excess_straight = n_straight - bg_straight * np.pi * (sep**2-(sep-step_size)**2)
cum_excess_bent = np.cumsum(excess_bent/90.)
cum_excess_straight = np.cumsum(excess_straight/90.)
err_bent = np.sqrt(n_bent + bg_bent)
err_straight = np.sqrt(n_straight + bg_straight)
cum_err_bent = np.sqrt(np.cumsum(err_bent/90.))
cum_err_straight = np.sqrt(np.cumsum(err_straight/90.))
print 'Bent excess within 2 Mpc: %.2f\pm%.2f\nStraight excess within 2 Mpc: %.2f\pm%.2f' % (cum_excess_bent[sep==2], cum_err_bent[sep==2], cum_excess_straight[sep==2], cum_err_straight[sep==2])
diff = cum_excess_bent[sep==2] - cum_excess_straight[sep==2]
diff_err = np.sqrt(cum_err_bent[sep==2] + cum_err_straight[sep==2])
sig_level = cum_excess_bent[sep==2] - cum_excess_straight[sep==2]
print 'Difference within 2 Mpc: %.2f\pm%.2f' % (diff, diff_err)
fig, ax = plt.subplots(1)
ax.errorbar(sep, cum_excess_bent, yerr=cum_err_bent, label='Bent sources')
ax.errorbar(sep+0.2*base_step_size, cum_excess_straight, yerr=cum_err_straight, ls='--', label='Straight sources')
ax.legend(loc='upper left')
ax.set_xlabel('Distance from radio source (Mpc)')
ax.set_ylabel('Cumulative excess per radio source')
ax.set_title('Companions per Mpc$^2$ around non-cluster radio sources')
def bending_limit():
h = 1. # jet width in kpc
p_min = np.mean([0.9, 0.6, 1.4, 1.7, 0.4, 0.6, 1.4])*0.0062415 # minimum synchrotron pressure in keV cm^-3
for source in bending_15.find():
size = source['RGZ']['size_kpc']
p_ram = source['WHL']['P']
theta = np.arcsin(0.5 * size/h * p_ram/p_min)
bending_15.update({'_id':source['_id']}, {'$set':{'WHL.bending_max':theta*180./np.pi if not np.isnan(theta) else 90.}})
def get_bent_cut():
bend = []
for i in bending_15.find(total_cuts):
bend.append(i['using_peaks']['bending_excess'])
bend = np.array(bend)
sigma = 0.682689492137
return np.percentile(bend, 100*(1+sigma)/2)
def remove_AllWISE(drop=False):
count = bending_15.count({'SDSS':{'$exists':True}})
if drop:
print bending_15.update({}, {'$unset':{'best':None}}, multi=True)
for source in bending_15.find({'best':{'$exists':False}}).batch_size(500):
z, z_err = get_z(source)
if z:
sql = 'select raerr, decerr from photoprimary where objid=%i' % source['SDSS']['objID']
df = SDSS_select(sql)
ra_err = df['raerr'][0]
dec_err = df['decerr'][0]
best = {'redshift':z, 'redshift_err':z_err, 'ra':source['SDSS']['ra'], 'ra_err':ra_err, 'dec':source['SDSS']['dec'], 'dec_err':dec_err}
bending_15.update({'_id':source['_id']}, {'$set':{'best':best}})
print '%i/%i' % (bending_15.find({'best':{'$exists':True}}).count(), count)
def sep_hist():
params = total_cuts.copy()
params['using_peaks.bending_angle'] = params['using_peaks.bending_corrected']
del params['using_peaks.bending_corrected']
sep = []
for source in bending_15.find(total_cuts):
sep.append(source['WHL']['r/r500'])
sep = np.array(sep)
plt.figure()
n, bins, _ = plt.hist(np.log10(sep), bins=20, fill=False, hatch='//')
area = np.pi * (pow(10, bins)[1:]**2 - pow(10, bins)[:-1]**2)
outer = np.percentile(sep, 95)
density = sum(sep<outer)/(np.pi*outer**2)
x = (pow(10, bins)[1:]+pow(10, bins)[:-1])/2.
y = density*area
popt = np.polyfit(x, y, 2)
newx = np.logspace(np.log10(min(sep)), np.log10(outer), 100)
plt.plot(np.log10(newx), popt[0]*newx**2 + popt[1]*newx + popt[2], label='Assuming uniform \ndistribution on the sky', c='b', ls=':')
plt.ylim(ymax=1300)
#plt.figure()
#plt.hist(sep, bins=pow(10,bins), fill=False, hatch='//')
#plt.xscale('log')
plt.xlabel(get_label('WHL.r/r500', True))
plt.ylabel('Count')
plt.legend(loc='upper left')
plt.tight_layout()
def density_ratio(x1, x2):
c500, gamma, alpha, beta = 1.177, 0.3081, 1.0510, 5.4905
rho1 = (c500*x1)**gamma * (1 + (c500*x1)**alpha)**(1.*(beta-gamma)/alpha)
rho2 = (c500*x2)**gamma * (1 + (c500*x2)**alpha)**(1.*(beta-gamma)/alpha)
return rho2 / rho1
def v500():
# orbital velocity at r500
c = np.sqrt(1.327e25) # km^1.5 s^-1
m500, r500 = [], []
for source in bending_15.find(total_cuts):
m500.append(source['WHL']['M500'])
r500.append(source['WHL']['r500'])
m500 = np.array(m500)
r500 = np.array(r500)
v = c * np.sqrt(m500/r500/3.086e19) # km s^-1
return np.median(v)
def scatter():
bending, mass, sep, pop = [], [], [], []
for source in bending_15.find(total_cuts):
bending.append(source['using_peaks']['bending_corrected'])
mass.append(source['WHL']['M500'])
sep.append(source['WHL']['r/r500'])
pop.append(source['WHL']['population'])
bending = np.array(bending)
mass = np.array(mass)
sep = np.array(sep)
pop = np.array(pop)
plt.scatter(mass[pop=='outer'], bending[pop=='outer'], s=1, label='outer region')
plt.scatter(mass[pop=='BCG'], bending[pop=='BCG'], s=1, label='BCGs')
plt.scatter(mass[pop=='inner'], bending[pop=='inner'], s=1, label='inner region')
plt.axhline(np.median(bending), ls=':', c='k')
plt.legend()
plt.xlabel(get_label('WHL.M500'))
plt.ylabel(get_label('using_peaks.bending_corrected'))
plt.tight_layout()
def get_phot_matches():
z_r, z_w, cid = [], [], []
for source in bending_15.find(total_cuts):
z_r.append(source['best']['redshift'])
z_w.append(source['WHL']['zbest'])
cid.append(source['RGZ']['RGZ_id'])
z_r = np.array(z_r)
z_w = np.array(z_w)
cid = np.array(cid)
bad_cid = list(cid[np.abs(z_w - z_r) > .04*(1+z_r)])
return bad_cid
def flag_dups(coll=bending_15):
coll.update({}, {'$set':{'RGZ.duplicate':0}}, multi=True)
cid, name = [], []
for source in coll.find(total_cuts):
name.append(source['RGZ']['RGZ_name'])
for n in set(name):
name.remove(n)
for source in coll.find({'RGZ.RGZ_name':{'$in':name}}):
cid.append(source['RGZ']['RGZ_id'])
for source in coll.find({'RGZ.RGZ_id':{'$in':cid}}):
if source['RGZ']['RGZ_name'] in name:
coll.update({'RGZ.RGZ_id':source['RGZ']['RGZ_id']}, {'$set':{'RGZ.duplicate':1}})
name.remove(source['RGZ']['RGZ_name'])
def print_supplement(filename='/home/garon/Documents/RGZdata/bending/data_supplement.csv'):
bad_cid = get_phot_matches()
cids = []
with open(filename[:-4] + '_table1' + filename[-4:], 'w') as f1:
with open(filename[:-4] + '_sample1' + filename[-4:], 'w') as g1:
for source in bending_15.find(total_cuts).sort('RGZ.RGZ_name', 1):
cids.append(source['RGZ']['RGZ_id'])
morph = 2 if source['RGZ']['morphology'] == 'double' else 3
ztype = 's' if 'spec_redshift' in source['SDSS'] else 'p'
align = 'r' if source['WHL']['alignment'] == 'radial' else ('t' if source['WHL']['alignment'] == 'tangential' else 'o')
if source['RGZ']['RGZ_id'] in bad_cid:
w_z = source['WHL']['zphot']
w_ztype = 'p'
else:
w_z = source['WHL']['zbest']
w_ztype = 's' if 'zspec' in source['WHL'] else 'p'
datastr = '%s,%s,%i,%.3f,%.5f,%.5f,%.4f,%.4f,%s,%.1f,%.1f,%.1f,%.2f,%s,%.5f,%.5f,%.4f,%s,%.2f,%.2f,%.2f,%.2f,%.1f,%s' % (source['RGZ']['RGZ_name'][3:], source['RGZ']['zooniverse_id'], morph, source['RGZ']['size_arcmin'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['best']['redshift_err'], ztype, source['using_peaks']['bending_angle'], source['using_peaks']['bending_corrected'], source['using_peaks']['bending_excess'], source['using_contour']['asymmetry'], source['WHL']['name'], source['WHL']['ra'], source['WHL']['dec'], w_z, w_ztype, source['WHL']['r/r500'], source['WHL']['r500'], source['WHL']['M500'], np.log10(source['WHL']['P']), source['WHL']['orientation_peaks'], align)
print >> f1, datastr
print >> g1, to_tex(datastr)
with open(filename[:-4] + '_table2' + filename[-4:], 'w') as f2:
with open(filename[:-4] + '_sample2' + filename[-4:], 'w') as g2:
params = total_cuts.copy()
params['RGZ.RGZ_id'] = {'$nin':cids}
for source in bent_sources.find(params).sort('RGZ.RGZ_name', 1):
morph = 2 if source['RGZ']['morphology'] == 'double' else 3
ztype = 's' if 'spec_redshift' in source['SDSS'] else 'p'
datastr = '%s,%s,%i,%.3f,%.5f,%.5f,%.4f,%.4f,%s,%.1f,%.1f,%.1f,%.2f' % (source['RGZ']['RGZ_name'][3:], source['RGZ']['zooniverse_id'], morph, source['RGZ']['size_arcmin'], source['best']['ra'], source['best']['dec'], source['best']['redshift'], source['best']['redshift_err'], ztype, source['using_peaks']['bending_angle'], source['using_peaks']['bending_corrected'], source['using_peaks']['bending_excess'], source['using_contour']['asymmetry'])
print >> f2, datastr
print >> g2, to_tex(datastr)
def to_tex(s):
keylist = [(',',' & '), ('& s','& $s$'), ('& p','& $p$'), ('& r','& $r$'), ('& t','& $t$'), ('& o','& $o$')]
for key in keylist:
s = s.replace(*key)
return s + ' \\\\'
def make_all_figs():
contamination()
sep_hist()
sdss_density()
rmsd()
bending_correct(plot=True, methods='using_peaks')
plot_running('WHL.r/r500', 'using_peaks.bending_excess', logx=True, pop='separate', title=False)
plot_running('WHL.M500', 'using_peaks.bending_excess', pop='inner', title=False)
plot_running('WHL.M500', 'using_peaks.bending_excess', pop='BCG', title=False)
plot_running('WHL.P', 'using_peaks.bending_excess', logx=True, pop='non-BCG', combined=True, title=False)
orientation()
orientation_test()
fractional_bent()
distant_env()
def update_SDSS(coll=catalog, start=0):
for entry in coll.find({'SDSS':{'$exists':False}, 'consensus.ir_ra':{'$exists':True}, 'catalog_id':{'$gte':start}}).sort('catalog_id', 1).batch_size(20):
print entry['catalog_id']
sdss_match = getSDSS(entry)
if sdss_match is not None:
z = sdss_match['spec_redshift'] if 'spec_redshift' in sdss_match else (sdss_match['photo_redshift'] if 'photo_redshift' in sdss_match else 0)
if z > 0:
radio = entry['radio']
radio_nested = {'radio':radio}
physical = getPhysical(z, radio_nested)
radio.update(physical)
print catalog.update({'_id':entry['_id']}, {'$set':{'radio':radio, 'SDSS':sdss_match}})
else:
print catalog.update({'_id':entry['_id']}, {'$set':{'SDSS':sdss_match}})
def vienna(data_in=False, data_out=False):
infile = '/home/garon/Downloads/vienna.csv'
outfile = '/home/garon/Downloads/vienna_matches.csv'
if data_in:
ix, ra, dec, z = [], [], [], []
with open(infile, 'r') as f:
r = csv.reader(f)
r.next()
for row in r:
ix.append(row[0])
ra.append(row[1])
dec.append(row[2])
z.append(row[3])
ix = np.array(ix, dtype=int)
ra = np.array(ra, dtype=float)
dec = np.array(dec, dtype=float)
z = np.array(z, dtype=float)
if data_out:
with open(outfile, 'w') as f:
print >> f, 'source#,radeg,decdeg,z,sep_mpc,sep_r500'
for i in range(len(idd)):
loc = coord.SkyCoord(ra[i], dec[i], unit=(u.deg,u.deg))
w = get_whl(loc, z[i], 0, 15, 0.04*(1+z[i]))
if w is not None:
loc2 = coord.SkyCoord(w['RAdeg'], w['DEdeg'], unit=(u.deg,u.deg))
sep_deg = loc2.separation(loc)
sep_mpc = float(cosmo.angular_diameter_distance(w['zspec'] if 'zspec' in w else w['zphot'])/u.Mpc * sep_deg.to(u.rad)/u.rad)
sep_r500 = sep_mpc / w['r500']
else:
sep_mpc, sep_r500 = 99., 99.
print >> f, '%i,%f,%f,%f,%f,%f' % (ix[i], ra[i], dec[i], z[i], sep_mpc, sep_r500)
def plot_annotations(source, peak_count=None):
entry = catalog.find_one({'catalog_id':source['RGZ']['RGZ_id']})
subject = subjects.find_one({'zooniverse_id':entry['zooniverse_id']})
fid = subject['metadata']['source']
fits_loc = pathdict[fid]
w = wcs.WCS(fits.getheader(fits_loc, 0))
ir = coord.SkyCoord(source['SDSS']['ra'], source['SDSS']['dec'], unit=(u.deg,u.deg), frame='icrs') if 'SDSS' in source else coord.SkyCoord(source['AllWISE']['ra'], source['AllWISE']['dec'], unit=(u.deg,u.deg), frame='icrs')
ir_pos = w.wcs_world2pix(np.array([[ir.ra.deg,ir.dec.deg]]), 1)
z, z_err = get_z(source)
peaks = entry['radio']['peaks']
peak_pos = w.wcs_world2pix(np.array([ [peak['ra'],peak['dec']] for peak in peaks ]), 1)
data = get_data(subject)
if peak_count is None:
if len(entry['radio']['peaks'])==2:
peak_count = 2
elif len(entry['radio']['peaks'])==3 or len(entry['radio']['components'])==3:
peak_count = 3
else:
print 'Not enough info for peak counting'
return
contour_tree = get_contours(w, ir_pos, peak_pos, data, peak_count)
contour_tree.print_contours()
plt.scatter(*ir_pos.T, marker='x')
x = [peak_pos.T[0][0], ir_pos[0][0], peak_pos.T[0][1]]
y = [peak_pos.T[-1][0], ir_pos[0][-1], peak_pos.T[-1][1]]
plt.plot(x, y)
for i in data['contours']:
for j in i:
print j['level']
def scale_region_file(infile):
scale_factor = 500./132
outfile = infile.split('.')
outfile[-2] += '_scaled'
outfile = '.'.join(outfile)
with open(infile, 'r') as f:
lines = f.readlines()
with open(outfile, 'w') as f:
for line in lines:
if line[:7]=='polygon':
scaled = []
for val in line[8:-2].split(','):
scaled.append(scale_factor*float(val))
print >> f, 'polygon(%s)' % ','.join(np.array(scaled, dtype=str))
else:
print >> f, line[:-1]
def update_dz():
for source in bending_15.find({'best':{'$exists':True}}):
z1 = source['best']['redshift']
z2 = source['WHL']['zbest']
dz = (z1-z2)/(1.+z1)
bending_15.update({'_id':source['_id']}, {'$set':{'WHL.dz':dz}})
def correlation(x_param, y_param, pop=None, bin_by=None, bin_count=0):
'''Significance of correlation between two parameters'''
assert pop in [None, 'BCG', 'inner', 'outer', 'separate'], "pop must be 'BCG', 'inner', 'outer', or 'separate'"
if bin_by is not None:
assert type(bin_count) is int and bin_count>0, 'bin_count must be positive int'
params = total_cuts.copy()
if pop is 'separate':
for pop2 in ['BCG', 'inner', 'outer']:
correlation(x_param, y_param, pop2, bin_by, bin_count)
return
elif pop in ['BCG', 'inner', 'outer']:
params['WHL.population'] = pop
if bin_by is not None:
bin_by_list = bin_by.split('.')
bins = np.arange(bin_count+1) * 100. / bin_count
vals = []
for i in bending_15.find(params):
vals.append(i[bin_by_list[0]][bin_by_list[1]])
samples = np.percentile(vals, bins)
print 'Pop: %s, binned by %s' % (pop, bin_by)
for i in range(len(samples)-1):
params[bin_by] = {'$gte':samples[i], '$lt':samples[i+1]}
_, x, _, y, _ = get_trends(params, x_param, y_param, bending_15, False, False)
rho = stats.spearmanr(x,y)
print ' Bin: %f-%f, corr: %f, sigma: %f' % (samples[i], samples[i+1], rho.correlation, z_score(rho.pvalue))
else:
_, x, _, y, _ = get_trends(params, x_param, y_param, bending_15, False, False)
rho = stats.spearmanr(x,y)
print 'Pop: %s, corr: %f, sigma: %f' % (pop, rho.correlation, z_score(rho.pvalue))
def size_comp(bin_count=1):
pop, z, size = [], [], []
for source in bending_15.find(total_cuts):
pop.append(source['WHL']['population'])
z.append(source['best']['redshift'])
size.append(source['RGZ']['size_kpc'])
pop = np.array(pop)
z = np.array(z)
size = np.array(size)
bins = np.arange(bin_count+1) * 100. / bin_count
samples = np.percentile(z, bins)
for i in range(len(samples)-1):
mask = np.logical_and(z>=samples[i], z<samples[i+1])
inner = np.logical_and(mask, pop=='inner')
outer = np.logical_and(mask, pop=='outer')
p = stats.anderson_ksamp([size[inner], size[outer]]).significance_level
print 'z = %.2f-%.2f' % (samples[i], samples[i+1])
print ' Inner: %.0f +- %.0f kpc (n=%i)' % (np.mean(size[inner]), np.std(size[inner]), len(size[inner]))
print ' Outer: %.0f +- %.0f kpc (n=%i)' % (np.mean(size[outer]), np.std(size[outer]), len(size[outer]))
print ' Same pop: %.2f sigma' % z_score(p)
def get_headtails():
params = total_cuts.copy()
params['RGZ.morphology'] = 'double'
near, far = [], []
count = 0
tot = bending_15.find(params).count()
for source in bending_15.find(params).batch_size(100):
count += 1
print '%i/%i' % (count, tot)
ir = coord.SkyCoord(source['best']['ra'], source['best']['dec'], unit=(u.deg,u.deg), frame='icrs')
rad0 = coord.SkyCoord(source['RGZ']['peaks'][0]['ra'], source['RGZ']['peaks'][0]['dec'], unit=(u.deg,u.deg), frame='icrs')
rad1 = coord.SkyCoord(source['RGZ']['peaks'][1]['ra'], source['RGZ']['peaks'][1]['dec'], unit=(u.deg,u.deg), frame='icrs')
sep0 = ir.separation(rad0).arcsec
sep1 = ir.separation(rad1).arcsec
near.append(min(sep0, sep1))
far.append(max(sep0, sep1))
near = np.array(near)
far = np.array(far)
return near, far
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. https://stackoverflow.com/a/6802723
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def projection_effects(theta=None, plot=False):
if theta is None:
theta = np.random.rand() * 90.
M = np.array([[-1, 0, 0], [0, 0, 0], [np.cos(theta*np.pi/180), np.sin(theta*np.pi/180), 0]])
proj = []
for i in np.arange(0, 360, 2)*np.pi/180:
rotx = rotation_matrix([1,0,0], i)
for j in np.arange(0, 360, 2)*np.pi/180:
roty = rotation_matrix([0,1,0], j)
Mprime = np.matmul(np.matmul(M, rotx), roty)
thetaprime = math.atan(Mprime[2,1] / Mprime[2,0])
proj.append(thetaprime)
proj = np.array(proj)
if plot:
plt.figure()
plt.hist(np.abs(proj), 180)
plt.axvline(theta*np.pi/180, color='k', label='True $\\theta$')
plt.axvline(np.mean(np.abs(proj)), color='r', label='Median observed $\\theta$')
plt.legend()
plt.xlabel('Observed bending angle (rad)')
plt.ylabel('Count')
plt.tight_layout()
return theta, np.median(np.abs(proj))
def total_projection(plot=True):
theta = []
true = np.arange(0, 180, 2)
for i in true:
theta.append(projection_effects(i)[1])
observed = np.array(theta)*180/np.pi
if plot:
plt.figure()
plt.plot(true[:45], true[:45], label='True $\\theta$')
plt.plot(true, observed, label='Median observed $\\theta$')
plt.legend()
plt.xlabel('True bending angle (deg)')
plt.ylabel('Median observed bending angle (deg)')
plt.tight_layout()
plt.figure()
plt.plot(true, observed-true, label='Observed-true')
plt.fill_between(true, -1*get_median_bend(), get_median_bend(), alpha=.5, label='Uncertainty')
plt.legend(loc='lower left')
plt.xlabel('True bending angle (deg)')
plt.ylabel('Error (deg)')
plt.tight_layout()
return true, observed
if __name__ == '__main__':
logging.basicConfig(filename='%s/bending.log' % rgz_path, level=logging.DEBUG, format='%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.captureWarnings(True)
logging.info('Bending run from command line')
# Run options
calculate_bending = True
match_to_clusters = True
control = False
# Generate the collection of bent sources
if calculate_bending:
done = False
while not done:
try:
if not bent_sources.count():
bent_sources.create_index('RGZ.RGZ_id', unique=True)
output('Processing sources from RGZ')
make_bent_sources()
output('%i double sources processed' % bent_sources.find({'RGZ.morphology':'double'}).count())
output('%i triple sources processed' % bent_sources.find({'RGZ.morphology':'triple'}).count())
done = True
except pymongo.errors.CursorNotFound as c:
time.sleep(10)
output('Cursor timed out; starting again.')
except BaseException as e:
logging.exception(e)
raise
with open(completed_file, 'w'): pass
# Match the sources in bent_sources to the cluster catalogs
if match_to_clusters:
done = False
while not done:
try:
if not bending_15.count():
bending_15.create_index('RGZ.RGZ_id', unique=True)
output('Matching sources to WHL')
make_catalog()
output('%i double sources matched to WHL' % bending_15.find({'RGZ.morphology':'double'}).count())
output('%i triple sources matched to WHL' % bending_15.find({'RGZ.morphology':'triple'}).count())
to_file('%s/csv/bending_catalog_15.csv' % rgz_path, bending_15)
done = True
except pymongo.errors.CursorNotFound as c:
time.sleep(10)
output('Cursor timed out; starting again.')
except BaseException as e:
logging.exception(e)
raise
# Generate a control sample by shuffling the positions of the sources
if control:
try:
if not bending_control.count():
bending_control.create_index('RGZ.RGZ_id', unique=True)
output('Generating control sources')
random_control()
output('%i double sources matched to clusters' % bending_control.find({'RGZ.morphology':'double'}).count())
output('%i triple sources matched to clusters' % bending_control.find({'RGZ.morphology':'double'}).count())
to_file('%s/csv/bending_control_15.csv' % rgz_path, bending_control)
except BaseException as e:
logging.exception(e)
raise
|
{
"content_hash": "40f1e0e1b0658bc100ad4020868446e8",
"timestamp": "",
"source": "github",
"line_count": 3120,
"max_line_length": 723,
"avg_line_length": 40.38621794871795,
"alnum_prop": 0.6446728304432364,
"repo_name": "afgaron/rgz-analysis",
"id": "25088cc76d03bd607e6d63a5b1b6f8bdfe8710b3",
"size": "126005",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/bending_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "147317"
},
{
"name": "Python",
"bytes": "691021"
},
{
"name": "Ruby",
"bytes": "3598"
},
{
"name": "Shell",
"bytes": "6723"
},
{
"name": "TeX",
"bytes": "40897"
}
],
"symlink_target": ""
}
|
"""Deals with loading & saving .size and .sizediff files.
The .size file is written in the following format. There are no section
delimiters, instead the end of a section is usually determined by a row count
on the first line of a section, followed by that number of rows. In other
cases, the sections have a known size.
Header
------
4 lines long.
Line 0 of the file is a header comment.
Line 1 is the serialization version of the file.
Line 2 is the number of characters in the header fields string.
Line 3 is the header fields string, a stringified JSON object.
Path list
---------
A list of paths. The first line is the size of the list,
and the next N lines that follow are items in the list. Each item is a tuple
of (object_path, source_path) where the two parts are tab separated.
Component list
--------------
A list of components. The first line is the size of the list,
and the next N lines that follow are items in the list. Each item is a unique
COMPONENT which is referenced later.
This section is only present if 'has_components' is True in header fields.
Symbol counts
-------------
2 lines long.
The first line is a tab separated list of section names.
The second line is a tab separated list of symbol group lengths, in the same
order as the previous line.
Numeric values
--------------
In each section, the number of rows is the same as the number of section names
in Symbol counts. The values on a row are space separated, in the order of the
symbols in each group.
Addresses
~~~~~~~~~~
Symbol start addresses which are delta-encoded.
Sizes
~~~~~
The number of bytes this symbol takes up.
Padding
~~~~~~~
The number of padding bytes this symbol has.
This section is only present if 'has_padding' is True in header fields.
Path indices
~~~~~~~~~~~~~
Indices that reference paths in the prior Path list section. Delta-encoded.
Component indices
~~~~~~~~~~~~~~~~~~
Indices that reference components in the prior Component list section.
Delta-encoded.
This section is only present if 'has_components' is True in header fields.
Symbols
-------
The final section contains details info on each symbol. Each line represents
a single symbol. Values are tab separated and follow this format:
symbol.full_name, symbol.num_aliases, symbol.flags
|num_aliases| will be omitted if the aliases of the symbol are the same as the
previous line. |flags| will be omitted if there are no flags.
The .sizediff file stores a sparse representation of a difference between .size
files. Each .sizediff file stores two sparse .size files, before and after,
containing only symbols that differed between "before" and "after". They can
be rendered via the Tiger viewer. .sizediff files use the following format:
Header
------
3 lines long.
Line 0 of the file is a header comment.
Line 1 is the number of characters in the header fields string.
Line 2 is the header fields string, a stringified JSON object. This currently
contains two fields, 'before_length' (the length in bytes of the 'before'
section) and 'version', which is always 1.
Before
------
The next |header.before_length| bytes are a valid gzipped sparse .size file
containing the "before" snapshot.
After
-----
All remaining bytes are a valid gzipped sparse .size file containing the
"after" snapshot.
"""
import contextlib
import gzip
import io
import itertools
import json
import logging
import os
import shutil
import sys
import models
import parallel
# File format version for .size files.
_SERIALIZATION_VERSION = 'Size File Format v1'
# Header for .sizediff files
_SIZEDIFF_HEADER = '# Created by //tools/binary_size\nDIFF\n'
class _Writer:
"""Helper to format and write data to a file object."""
def __init__(self, file_obj):
self.file_obj_ = file_obj
def WriteBytes(self, b):
# Direct write of raw bytes.
self.file_obj_.write(b)
def WriteString(self, s):
self.file_obj_.write(s.encode('ascii'))
def WriteLine(self, s):
self.file_obj_.write(s.encode('ascii'))
self.file_obj_.write(b'\n')
def WriteNumberList(self, gen):
"""Writes numbers from |gen| separated by space, in one line."""
sep = b''
for num in gen:
self.WriteBytes(sep)
self.WriteString(str(num))
sep = b' '
self.WriteBytes(b'\n')
def LogSize(self, desc):
self.file_obj_.flush()
size = self.file_obj_.tell()
logging.debug('File size with %s: %d' % (desc, size))
def SortSymbols(raw_symbols):
logging.debug('Sorting %d symbols', len(raw_symbols))
# TODO(agrieve): Either change this sort so that it's only sorting by section
# (and not using .sort()), or have it specify a total ordering (which must
# also include putting padding-only symbols before others of the same
# address). Note: The sort as-is takes ~1.5 seconds.
raw_symbols.sort(
key=lambda s: (s.IsPak(), s.IsBss(), s.section_name, s.address))
logging.info('Processed %d symbols', len(raw_symbols))
def CalculatePadding(raw_symbols):
"""Populates the |padding| field based on symbol addresses. """
logging.info('Calculating padding')
# Padding not really required, but it is useful to check for large padding and
# log a warning.
seen_sections = set()
for i, symbol in enumerate(raw_symbols[1:]):
prev_symbol = raw_symbols[i]
if symbol.IsOverhead():
# Overhead symbols are not actionable so should be padding-only.
symbol.padding = symbol.size
if prev_symbol.section_name != symbol.section_name:
assert symbol.section_name not in seen_sections, (
'Input symbols must be sorted by section, then address.')
seen_sections.add(symbol.section_name)
continue
if (symbol.address <= 0 or prev_symbol.address <= 0
or not symbol.IsNative() or not prev_symbol.IsNative()):
continue
if symbol.address == prev_symbol.address:
if symbol.aliases and symbol.aliases is prev_symbol.aliases:
symbol.padding = prev_symbol.padding
symbol.size = prev_symbol.size
continue
# Padding-only symbols happen for ** symbol gaps.
assert prev_symbol.size_without_padding == 0, (
'Found duplicate symbols:\n%r\n%r' % (prev_symbol, symbol))
padding = symbol.address - prev_symbol.end_address
symbol.padding = padding
symbol.size += padding
assert symbol.size >= 0, (
'Symbol has negative size (likely not sorted propertly): '
'%r\nprev symbol: %r' % (symbol, prev_symbol))
def _ExpandSparseSymbols(sparse_symbols):
"""Expands a symbol list with all aliases of all symbols in the list.
Args:
sparse_symbols: A list or SymbolGroup to expand.
"""
representative_symbols = set()
raw_symbols = set()
logging.debug('Expanding sparse_symbols with aliases of included symbols')
for sym in sparse_symbols:
if sym.aliases:
representative_symbols.add(sym.aliases[0])
else:
raw_symbols.add(sym)
for sym in representative_symbols:
raw_symbols.update(set(sym.aliases))
raw_symbols = list(raw_symbols)
SortSymbols(raw_symbols)
logging.debug('Done expanding sparse_symbols')
return models.SymbolGroup(raw_symbols)
def _SaveSizeInfoToFile(size_info,
file_obj,
include_padding=False,
sparse_symbols=None):
"""Saves size info to a .size file.
Args:
size_info: Data to write to the file
file_obj: File opened for writing.
include_padding: Whether to save padding data, useful if adding a subset of
symbols.
sparse_symbols: If present, only save these symbols to the file.
"""
if sparse_symbols is not None:
# Any aliases of sparse symbols must also be included, or else file
# parsing will attribute symbols that happen to follow an incomplete alias
# group to that alias group.
raw_symbols = _ExpandSparseSymbols(sparse_symbols)
else:
raw_symbols = size_info.raw_symbols
w = _Writer(file_obj)
# Created by supersize header
w.WriteLine('# Created by //tools/binary_size')
w.WriteLine(_SERIALIZATION_VERSION)
# JSON header fields
fields = {
'metadata': size_info.metadata,
'section_sizes': size_info.section_sizes,
'has_components': True,
'has_padding': include_padding,
}
fields_str = json.dumps(fields, indent=2, sort_keys=True)
w.WriteLine(str(len(fields_str)))
w.WriteLine(fields_str)
w.LogSize('header') # For libchrome: 570 bytes.
# Store a single copy of all paths and have them referenced by index.
unique_path_tuples = sorted(
set((s.object_path, s.source_path) for s in raw_symbols))
path_tuples = {tup: i for i, tup in enumerate(unique_path_tuples)}
w.WriteLine(str(len(unique_path_tuples)))
for pair in unique_path_tuples:
w.WriteLine('%s\t%s' % pair)
w.LogSize('paths') # For libchrome, adds 200kb.
# Store a single copy of all components and have them referenced by index.
unique_components = sorted(set(s.component for s in raw_symbols))
components = {comp: i for i, comp in enumerate(unique_components)}
w.WriteLine(str(len(unique_components)))
for comp in unique_components:
w.WriteLine(comp)
w.LogSize('components')
# Symbol counts by section.
symbol_group_by_section = raw_symbols.GroupedBySectionName()
w.WriteLine('\t'.join(g.name for g in symbol_group_by_section))
w.WriteLine('\t'.join(str(len(g)) for g in symbol_group_by_section))
def gen_delta(gen, prev_value=0):
"""Adapts a generator of numbers to deltas."""
for value in gen:
yield value - prev_value
prev_value = value
def write_groups(func, delta=False):
"""Write func(symbol) for each symbol in each symbol group.
Each line written represents one symbol group in |symbol_group_by_section|.
The values in each line are space separated and are the result of calling
|func| with the Nth symbol in the group.
If |delta| is True, the differences in values are written instead."""
for group in symbol_group_by_section:
gen = map(func, group)
w.WriteNumberList(gen_delta(gen) if delta else gen)
write_groups(lambda s: s.address, delta=True)
w.LogSize('addresses') # For libchrome, adds 300kb.
write_groups(lambda s: s.size if s.IsOverhead() else s.size_without_padding)
w.LogSize('sizes') # For libchrome, adds 300kb
# Padding for non-padding-only symbols is recalculated from addresses on
# load, so we only need to write it if we're writing a subset of symbols.
if include_padding:
write_groups(lambda s: s.padding)
w.LogSize('paddings') # For libchrome, adds 300kb
write_groups(
lambda s: path_tuples[(s.object_path, s.source_path)], delta=True)
w.LogSize('path indices') # For libchrome: adds 125kb.
write_groups(lambda s: components[s.component], delta=True)
w.LogSize('component indices')
prev_aliases = None
for group in symbol_group_by_section:
for symbol in group:
w.WriteString(symbol.full_name)
if symbol.aliases and symbol.aliases is not prev_aliases:
w.WriteString('\t0%x' % symbol.num_aliases)
prev_aliases = symbol.aliases
if symbol.flags:
w.WriteString('\t%x' % symbol.flags)
w.WriteBytes(b'\n')
w.LogSize('names (final)') # For libchrome: adds 3.5mb.
def _ReadLine(file_iter):
"""Read a line from a file object iterator and remove the newline character.
Args:
file_iter: File object iterator
Returns:
String
"""
# str[:-1] removes the last character from a string, specifically the newline
return next(file_iter)[:-1]
def _ReadValuesFromLine(file_iter, split):
"""Read a list of values from a line in a file object iterator.
Args:
file_iter: File object iterator
split: Splits the line with the given string
Returns:
List of string values
"""
return _ReadLine(file_iter).split(split)
def _LoadSizeInfoFromFile(file_obj, size_path):
"""Loads a size_info from the given file.
See _SaveSizeInfoToFile() for details on the .size file format.
Args:
file_obj: File to read, should be a GzipFile
"""
# Split lines on '\n', since '\r' can appear in some lines!
lines = io.TextIOWrapper(file_obj, newline='\n')
_ReadLine(lines) # Line 0: Created by supersize header
actual_version = _ReadLine(lines)
assert actual_version == _SERIALIZATION_VERSION, (
'Version mismatch. Need to write some upgrade code.')
# JSON header fields
json_len = int(_ReadLine(lines))
json_str = lines.read(json_len)
fields = json.loads(json_str)
section_sizes = fields['section_sizes']
metadata = fields.get('metadata')
has_components = fields.get('has_components', False)
has_padding = fields.get('has_padding', False)
# Eat empty line.
_ReadLine(lines)
# Path list
num_path_tuples = int(_ReadLine(lines)) # Line 4 - number of paths in list
# Read the path list values and store for later
path_tuples = [
_ReadValuesFromLine(lines, split='\t') for _ in range(num_path_tuples)
]
# Component list
if has_components:
num_components = int(_ReadLine(lines)) # number of components in list
components = [_ReadLine(lines) for _ in range(num_components)]
# Symbol counts by section.
section_names = _ReadValuesFromLine(lines, split='\t')
section_counts = [int(c) for c in _ReadValuesFromLine(lines, split='\t')]
# Addresses, sizes, paddings, path indices, component indices
def read_numeric(delta=False):
"""Read numeric values, where each line corresponds to a symbol group.
The values in each line are space separated.
If |delta| is True, the numbers are read as a value to add to the sum of the
prior values in the line, or as the amount to change by.
"""
ret = []
delta_multiplier = int(delta)
for _ in section_counts:
value = 0
fields = []
for f in _ReadValuesFromLine(lines, split=' '):
value = value * delta_multiplier + int(f)
fields.append(value)
ret.append(fields)
return ret
addresses = read_numeric(delta=True)
sizes = read_numeric(delta=False)
if has_padding:
paddings = read_numeric(delta=False)
else:
paddings = [None] * len(section_names)
path_indices = read_numeric(delta=True)
if has_components:
component_indices = read_numeric(delta=True)
else:
component_indices = [None] * len(section_names)
raw_symbols = [None] * sum(section_counts)
symbol_idx = 0
for (cur_section_name, cur_section_count, cur_addresses, cur_sizes,
cur_paddings, cur_path_indices, cur_component_indices) in zip(
section_names, section_counts, addresses, sizes, paddings,
path_indices, component_indices):
alias_counter = 0
for i in range(cur_section_count):
parts = _ReadValuesFromLine(lines, split='\t')
full_name = parts[0]
flags_part = None
aliases_part = None
# aliases_part or flags_part may have been omitted.
if len(parts) == 3:
# full_name aliases_part flags_part
aliases_part = parts[1]
flags_part = parts[2]
elif len(parts) == 2:
if parts[1][0] == '0':
# full_name aliases_part
aliases_part = parts[1]
else:
# full_name flags_part
flags_part = parts[1]
# Use a bit less RAM by using the same instance for this common string.
if full_name == models.STRING_LITERAL_NAME:
full_name = models.STRING_LITERAL_NAME
flags = int(flags_part, 16) if flags_part else 0
num_aliases = int(aliases_part, 16) if aliases_part else 0
# Skip the constructor to avoid default value checks
new_sym = models.Symbol.__new__(models.Symbol)
new_sym.section_name = cur_section_name
new_sym.full_name = full_name
new_sym.address = cur_addresses[i]
new_sym.size = cur_sizes[i]
paths = path_tuples[cur_path_indices[i]]
new_sym.object_path, new_sym.source_path = paths
component = components[cur_component_indices[i]] if has_components else ''
new_sym.component = component
new_sym.flags = flags
# Derived
if cur_paddings:
new_sym.padding = cur_paddings[i]
new_sym.size += new_sym.padding
else:
# This will be computed during CreateSizeInfo()
new_sym.padding = 0
new_sym.template_name = ''
new_sym.name = ''
if num_aliases:
assert alias_counter == 0
new_sym.aliases = [new_sym]
alias_counter = num_aliases - 1
elif alias_counter > 0:
new_sym.aliases = raw_symbols[symbol_idx - 1].aliases
new_sym.aliases.append(new_sym)
alias_counter -= 1
else:
new_sym.aliases = None
raw_symbols[symbol_idx] = new_sym
symbol_idx += 1
if not has_padding:
CalculatePadding(raw_symbols)
return models.SizeInfo(section_sizes, raw_symbols, metadata=metadata,
size_path=size_path)
@contextlib.contextmanager
def _OpenGzipForWrite(path, file_obj=None):
# Open in a way that doesn't set any gzip header fields.
if file_obj:
with gzip.GzipFile(filename='', mode='wb', fileobj=file_obj, mtime=0) as fz:
yield fz
else:
with open(path, 'wb') as f:
with gzip.GzipFile(filename='', mode='wb', fileobj=f, mtime=0) as fz:
yield fz
def SaveSizeInfo(size_info,
path,
file_obj=None,
include_padding=False,
sparse_symbols=None):
"""Saves |size_info| to |path|."""
if os.environ.get('SUPERSIZE_MEASURE_GZIP') == '1':
# Doing serialization and Gzip together.
with _OpenGzipForWrite(path, file_obj=file_obj) as f:
_SaveSizeInfoToFile(
size_info,
f,
include_padding=include_padding,
sparse_symbols=sparse_symbols)
else:
# Doing serizliation and Gzip separately.
# This turns out to be faster. On Python 3: 40s -> 14s.
bytesio = io.BytesIO()
_SaveSizeInfoToFile(
size_info,
bytesio,
include_padding=include_padding,
sparse_symbols=sparse_symbols)
logging.debug('Serialization complete. Gzipping...')
bytesio.seek(0)
with _OpenGzipForWrite(path, file_obj=file_obj) as f:
f.write(bytesio.read())
def LoadSizeInfo(filename, file_obj=None):
"""Returns a SizeInfo loaded from |filename|."""
with gzip.GzipFile(filename=filename, fileobj=file_obj) as f:
return _LoadSizeInfoFromFile(f, filename)
def SaveDeltaSizeInfo(delta_size_info, path, file_obj=None):
"""Saves |delta_size_info| to |path|."""
changed_symbols = delta_size_info.raw_symbols \
.WhereDiffStatusIs(models.DIFF_STATUS_UNCHANGED).Inverted()
before_symbols = models.SymbolGroup(
[sym.before_symbol for sym in changed_symbols if sym.before_symbol])
after_symbols = models.SymbolGroup(
[sym.after_symbol for sym in changed_symbols if sym.after_symbol])
before_size_file = io.BytesIO()
after_size_file = io.BytesIO()
after_promise = parallel.CallOnThread(
SaveSizeInfo,
delta_size_info.after,
'',
file_obj=after_size_file,
include_padding=True,
sparse_symbols=after_symbols)
SaveSizeInfo(
delta_size_info.before,
'',
file_obj=before_size_file,
include_padding=True,
sparse_symbols=before_symbols)
with file_obj or open(path, 'wb') as output_file:
w = _Writer(output_file)
# |_SIZEDIFF_HEADER| is multi-line with new line at end, so use
# WriteString() instead of WriteLine().
w.WriteString(_SIZEDIFF_HEADER)
# JSON header fields
fields = {
'version': 1,
'before_length': before_size_file.tell(),
}
fields_str = json.dumps(fields, indent=2, sort_keys=True)
w.WriteLine(str(len(fields_str)))
w.WriteLine(fields_str)
before_size_file.seek(0)
shutil.copyfileobj(before_size_file, output_file)
after_promise.get()
after_size_file.seek(0)
shutil.copyfileobj(after_size_file, output_file)
|
{
"content_hash": "ddbcc1a0d1b8e24770e51f6061d582b5",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 80,
"avg_line_length": 33.05298013245033,
"alnum_prop": 0.6775195351632939,
"repo_name": "endlessm/chromium-browser",
"id": "20b5ec11383ab677b3da8e1a5de5ca2dd45359ce",
"size": "20127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/binary_size/libsupersize/file_format.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Tests for AddSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.contrib.opt.python.training import addsign
from tensorflow.contrib.opt.python.training import sign_decay
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def py_linear_decay_fn(decay_steps):
def linear_decay(step):
step = min(step, decay_steps)
return float(decay_steps - step) / decay_steps
return linear_decay
def addsign_update_numpy(params,
g_t,
m,
lr,
alpha=1.0,
beta=0.9,
py_sign_decay_fn=None,
t=None):
m_t = beta * m + (1 - beta) * g_t
if py_sign_decay_fn is None:
sign_decayed = 1.0
else:
sign_decayed = py_sign_decay_fn(t-1)
multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)
params_t = params - lr * multiplier * g_t
return params_t, m_t
class AddSignTest(xla_test.XLATestCase):
def _testDense(self,
learning_rate=0.1,
sign_decay_fn=None,
py_sign_decay_fn=None,
alpha=1.0,
beta=0.9):
for dtype in self.float_types:
with self.cached_session(), self.test_scope():
# Initialize variables for numpy implementation.
m0, m1 = 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
global_step = resource_variable_ops.ResourceVariable(0, trainable=False)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = addsign.AddSignOptimizer(
learning_rate=learning_rate,
alpha=alpha,
beta=beta,
sign_decay_fn=sign_decay_fn,
)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
global_step=global_step)
neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of AddSign
# first 4 steps with positive gradient
# last 3 steps with negative gradient (sign(gm) should be -1)
for t in range(1, 8):
if t < 5:
update.run()
else:
neg_update.run()
var0_np, m0 = addsign_update_numpy(
var0_np,
grads0_np if t < 5 else -grads0_np,
m0,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
var1_np, m1 = addsign_update_numpy(
var1_np,
grads1_np if t < 5 else -grads1_np,
m1,
learning_rate,
alpha=alpha,
beta=beta,
py_sign_decay_fn=py_sign_decay_fn,
t=t,
)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), half_rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10
sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)
py_sign_decay_fn = py_linear_decay_fn(decay_steps)
self._testDense()
self._testDense(learning_rate=0.01, alpha=0.1, beta=0.8)
self._testDense(
sign_decay_fn=sign_decay_fn, py_sign_decay_fn=py_sign_decay_fn)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "c6728bf70d19dac1e95047d014af8a4c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 80,
"avg_line_length": 33.796875,
"alnum_prop": 0.5654184003698567,
"repo_name": "apark263/tensorflow",
"id": "a37c97e6d374440aeb860b9d02f2d5dd95c91f62",
"size": "5015",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/tests/addsign_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "561314"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "54581021"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1373561"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "899393"
},
{
"name": "Jupyter Notebook",
"bytes": "2618454"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75994"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14340"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "44616385"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "504099"
},
{
"name": "Smarty",
"bytes": "10072"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_ADD_MEMBER_URL as add_member_url
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_CREATE_URL as create_url
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_INDEX_URL as index_url
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_INDEX_VIEW_TEMPLATE
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_MANAGE_URL as manage_url
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_MANAGE_VIEW_TEMPLATE
from openstack_dashboard.dashboards.admin.groups.constants \
import GROUPS_UPDATE_URL as update_url
GROUPS_INDEX_URL = reverse(index_url)
GROUP_CREATE_URL = reverse(create_url)
GROUP_UPDATE_URL = reverse(update_url, args=[1])
GROUP_MANAGE_URL = reverse(manage_url, args=[1])
GROUP_ADD_MEMBER_URL = reverse(add_member_url, args=[1])
class GroupsViewTests(test.BaseAdminViewTests):
def _get_domain_id(self):
return self.request.session.get('domain_context', None)
def _get_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('group_list',)})
def test_index(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
api.keystone.group_list(IgnoreArg()).AndReturn(groups)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
if domain_id:
for group in res.context['table'].data:
self.assertItemsEqual(group.domain_id, domain_id)
self.assertContains(res, 'Create Group')
self.assertContains(res, 'Edit')
self.assertContains(res, 'Delete Group')
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('group_list',
'keystone_can_edit_group')})
def test_index_with_keystone_can_edit_group_false(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
api.keystone.group_list(IgnoreArg()).AndReturn(groups)
api.keystone.keystone_can_edit_group() \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
self.assertNotContains(res, 'Create Group')
self.assertNotContains(res, 'Edit')
self.assertNotContains(res, 'Delete Group')
@test.create_stubs({api.keystone: ('group_create', )})
def test_create(self):
domain_id = self._get_domain_id()
group = self.groups.get(id="1")
api.keystone.group_create(IsA(http.HttpRequest),
description=group.description,
domain_id=domain_id,
name=group.name).AndReturn(group)
self.mox.ReplayAll()
formData = {'method': 'CreateGroupForm',
'name': group.name,
'description': group.description}
res = self.client.post(GROUP_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('group_get',
'group_update')})
def test_update(self):
group = self.groups.get(id="1")
test_description = 'updated description'
api.keystone.group_get(IsA(http.HttpRequest), '1').AndReturn(group)
api.keystone.group_update(IsA(http.HttpRequest),
description=test_description,
group_id=group.id,
name=group.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateGroupForm',
'group_id': group.id,
'name': group.name,
'description': test_description}
res = self.client.post(GROUP_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('group_list',
'group_delete')})
def test_delete_group(self):
group = self.groups.get(id="2")
api.keystone.group_list(IgnoreArg()).AndReturn(self.groups.list())
api.keystone.group_delete(IgnoreArg(), group.id)
self.mox.ReplayAll()
formData = {'action': 'groups__delete__%s' % group.id}
res = self.client.post(GROUPS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, GROUPS_INDEX_URL)
@test.create_stubs({api.keystone: ('group_get',
'user_list',)})
def test_manage(self):
group = self.groups.get(id="1")
group_members = self.users.list()
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(group_members)
self.mox.ReplayAll()
res = self.client.get(GROUP_MANAGE_URL)
self.assertTemplateUsed(res, GROUPS_MANAGE_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, group_members)
@test.create_stubs({api.keystone: ('user_list',
'remove_group_user')})
def test_remove_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(self.users.list())
api.keystone.remove_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_members__removeGroupMember__%s' % user.id}
res = self.client.post(GROUP_MANAGE_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('group_get',
'user_list',
'add_group_user')})
def test_add_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(),
domain=group.domain_id).\
AndReturn(self.users.list())
api.keystone.user_list(IgnoreArg(),
group=group.id).\
AndReturn(self.users.list()[2:])
api.keystone.add_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_non_members__addMember__%s' % user.id}
res = self.client.post(GROUP_ADD_MEMBER_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
|
{
"content_hash": "c211e168396a6b5b44b2c1fc6c28591b",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 37.04524886877828,
"alnum_prop": 0.5891046781482838,
"repo_name": "rackerlabs/horizon",
"id": "2bb59105bd8fe0046edc7b514586cd560e04a01c",
"size": "8866",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/groups/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from logscan.match import Matcher
from logscan.watch import Watcher
from logscan.schedule import Schedule
if __name__ == '__main__':
import sys
sched = Schedule()
try:
sched.add_watcher(Watcher(sys.argv[1], Matcher('#123#')))
sched.add_watcher(Watcher(sys.argv[2], Matcher('#123#')))
sched.join()
except KeyboardInterrupt:
sched.remove_watcher(sys.argv[1])
sched.remove_watcher(sys.argv[2])
sched.join()
|
{
"content_hash": "7f786003c0f14b57b0ceca6723378a36",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 31,
"alnum_prop": 0.6430107526881721,
"repo_name": "magedu/logscan",
"id": "8c3433a21009993b9c4e9523567382e9d8ecdb18",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19569"
}
],
"symlink_target": ""
}
|
from base import Cron
__all__ = ['Cron']
|
{
"content_hash": "5082721800aae28bf4eaa59921491805",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 21,
"avg_line_length": 20,
"alnum_prop": 0.625,
"repo_name": "divio/django-cronjobs",
"id": "15c903dd48321cbae2645a83dec89f059a013085",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cronjobs/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23797"
}
],
"symlink_target": ""
}
|
"""Cluster creation related checks"""
from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
conductor = c.API
data_source_type = {
"type": "string",
"enum": ["swift", "hdfs", "maprfs", "manila"]
}
job_configs = {
"type": "object",
"properties": {
"configs": {
"type": "simple_config",
},
"params": {
"type": "simple_config",
},
"args": {
"type": "array",
"items": {
"type": "string"
}
}
},
"additionalProperties": False,
}
def check_data_source_unique_name(name):
if name in [ds.name for ds in conductor.data_source_get_all(
context.ctx())]:
raise ex.NameAlreadyExistsException(_("Data source with name '%s' "
"already exists") % name)
def check_data_source_exists(data_source_id):
if not conductor.data_source_get(context.ctx(), data_source_id):
raise ex.InvalidReferenceException(
_("DataSource with id '%s' doesn't exist") % data_source_id)
def check_job_unique_name(name):
if name in [j.name for j in conductor.job_get_all(context.ctx())]:
raise ex.NameAlreadyExistsException(_("Job with name '%s' "
"already exists") % name)
def check_job_binary_internal_exists(jbi_id):
if not conductor.job_binary_internal_get(context.ctx(), jbi_id):
raise ex.InvalidReferenceException(
_("JobBinaryInternal with id '%s' doesn't exist") % jbi_id)
def check_data_sources_are_different(data_source_1_id, data_source_2_id):
ds1 = conductor.data_source_get(context.ctx(), data_source_1_id)
ds2 = conductor.data_source_get(context.ctx(), data_source_2_id)
if ds1.type == ds2.type and ds1.url == ds2.url:
raise ex.InvalidDataException(_('Provided input and output '
'DataSources reference the same '
'location: %s') % ds1.url)
|
{
"content_hash": "068536b672c8a760cd4d6d0d9c8ed142",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 75,
"avg_line_length": 31.567164179104477,
"alnum_prop": 0.566903073286052,
"repo_name": "crobby/sahara",
"id": "f664f2995020e0f86d9e5d7232e8d0360282d9a5",
"size": "2698",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara/service/validations/edp/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "33627"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3355980"
},
{
"name": "Shell",
"bytes": "61693"
}
],
"symlink_target": ""
}
|
""" Provide the Either property.
The Either property is used to construct properties that an accept any of
multiple possible types.
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, Type, TypeVar
# Bokeh imports
from ...util.strings import nice_join
from ._sphinx import property_link, register_type_link, type_link
from .bases import (
Init,
ParameterizedProperty,
Property,
TypeOrInst,
)
from .singletons import Intrinsic
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Either',
)
T = TypeVar("T")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Either(ParameterizedProperty[Any]):
""" Accept values according to a sequence of other property types.
Example:
.. code-block:: python
>>> class EitherModel(HasProps):
... prop = Either(Bool, Int, Auto)
...
>>> m = EitherModel()
>>> m.prop = True
>>> m.prop = 10
>>> m.prop = "auto"
>>> m.prop = 10.3 # ValueError !!
>>> m.prop = "foo" # ValueError !!
"""
def __init__(self, type_param0: TypeOrInst[Property[Any]], *type_params: TypeOrInst[Property[Any]],
default: Init[T] = Intrinsic, help: str | None = None) -> None:
super().__init__(type_param0, *type_params, default=default, help=help)
for tp in self.type_params:
self.alternatives.extend(tp.alternatives)
def transform(self, value: Any) -> Any:
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError(f"Could not transform {value!r}")
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if any(param.is_valid(value) for param in self.type_params):
return
msg = "" if not detail else f"expected an element of either {nice_join([ str(param) for param in self.type_params ])}, got {value!r}"
raise ValueError(msg)
def wrap(self, value):
for tp in self.type_params:
value = tp.wrap(value)
return value
def replace(self, old: Type[Property[Any]], new: Property[Any]) -> Property[Any]:
if self.__class__ == old:
return new
else:
params = [ type_param.replace(old, new) for type_param in self.type_params ]
return Either(*params)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@register_type_link(Either)
def _sphinx_type_link(obj: Either[Any]):
subtypes = ", ".join(type_link(x) for x in obj.type_params)
return f"{property_link(obj)}({subtypes})"
|
{
"content_hash": "82d348924aafa734db2db27b5c2b6e89",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 141,
"avg_line_length": 31.233870967741936,
"alnum_prop": 0.4265427317325071,
"repo_name": "bokeh/bokeh",
"id": "ebea711dc4faf1e55f374b98ca481e0aa8243d98",
"size": "4204",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "src/bokeh/core/property/either.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
}
|
"""Settings for the Au-to-do app."""
TEMPLATE_BASE_PATH = 'templates/'
# Prediction API credentials keyname.
CREDENTIALS_KEYNAME = 'prediction_credentials'
# OAuth 2.0 related constant.
CLIENT_ID = (
'your_client_id'
)
CLIENT_SECRET = 'your_client_secret'
# TODO(user): Make sure that all the scopes are included.
SCOPES = ['https://www.googleapis.com/auth/prediction']
USER_AGENT = 'au-to-do'
DOMAIN = 'anonymous'
# Whether or not to use memcache for caching of JSON models.
USE_MEMCACHE_FOR_JSON_MODELS = True
MEMCACHE_VERSION_PREFIX = '1-'
# Google Storage Legacy Access
GS_LEGACY_ACCESS = 'your_legacy_access_key'
GS_LEGACY_SECRET = 'your_legacy_access_secret'
GS_BUCKET = 'autodo-predictionmodels'
|
{
"content_hash": "74d0f7cdb9bd7cf21f0e6def60f50207",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 60,
"avg_line_length": 25.535714285714285,
"alnum_prop": 0.7314685314685314,
"repo_name": "stigsfoot/support-autodo",
"id": "27cc437e9dc7919db517e81b3135c4cd9d2b2fb6",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "41708"
},
{
"name": "Python",
"bytes": "126839"
}
],
"symlink_target": ""
}
|
import collections
import json
from typing import Dict, Union
from TM1py.Objects.Subset import Subset, AnonymousSubset
from TM1py.Objects.TM1Object import TM1Object
from TM1py.Utils import format_url
class ViewAxisSelection(TM1Object):
""" Describes what is selected in a dimension on an axis. Can be a Registered Subset or an Anonymous Subset
"""
def __init__(self, dimension_name: str, subset: Union[Subset, AnonymousSubset]):
"""
:Parameters:
`dimension_name` : String
`subset` : Subset or AnonymousSubset
"""
self._subset = subset
self._dimension_name = dimension_name
self._hierarchy_name = dimension_name
@property
def subset(self) -> Union[Subset, AnonymousSubset]:
return self._subset
@property
def dimension_name(self) -> str:
return self._dimension_name
@property
def hierarchy_name(self) -> str:
return self._hierarchy_name
@property
def body(self) -> str:
return json.dumps(self._construct_body(), ensure_ascii=False)
@property
def body_as_dict(self) -> Dict:
return self._construct_body()
def _construct_body(self) -> Dict:
""" construct the ODATA conform JSON represenation for the ViewAxisSelection entity.
:return: dictionary
"""
body_as_dict = collections.OrderedDict()
if isinstance(self._subset, AnonymousSubset):
body_as_dict['Subset'] = json.loads(self._subset.body)
elif isinstance(self._subset, Subset):
subset_path = format_url(
"Dimensions('{}')/Hierarchies('{}')/Subsets('{}')",
self._dimension_name, self._hierarchy_name, self._subset.name)
body_as_dict['Subset@odata.bind'] = subset_path
return body_as_dict
class ViewTitleSelection:
""" Describes what is selected in a dimension on the view title.
Can be a Registered Subset or an Anonymous Subset
"""
def __init__(self, dimension_name: str, subset: Union[AnonymousSubset, Subset], selected: str):
self._dimension_name = dimension_name
self._hierarchy_name = dimension_name
self._subset = subset
self._selected = selected
@property
def subset(self) -> Union[Subset, AnonymousSubset]:
return self._subset
@property
def dimension_name(self) -> str:
return self._dimension_name
@property
def hierarchy_name(self) -> str:
return self._hierarchy_name
@property
def selected(self) -> str:
return self._selected
@property
def body(self) -> str:
return json.dumps(self._construct_body(), ensure_ascii=False)
def _construct_body(self) -> Dict:
""" construct the ODATA conform JSON represenation for the ViewTitleSelection entity.
:return: string, the valid JSON
"""
body_as_dict = collections.OrderedDict()
if isinstance(self._subset, AnonymousSubset):
body_as_dict['Subset'] = json.loads(self._subset.body)
elif isinstance(self._subset, Subset):
subset_path = format_url(
"Dimensions('{}')/Hierarchies('{}')/Subsets('{}')",
self._dimension_name, self._hierarchy_name, self._subset.name)
body_as_dict['Subset@odata.bind'] = subset_path
element_path = format_url(
"Dimensions('{}')/Hierarchies('{}')/Elements('{}')",
self._dimension_name, self._hierarchy_name, self._selected)
body_as_dict['Selected@odata.bind'] = element_path
return body_as_dict
|
{
"content_hash": "6c615455c961d41169b74396dfcde287",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 111,
"avg_line_length": 33.127272727272725,
"alnum_prop": 0.6215697036223929,
"repo_name": "OLAPLINE/TM1py",
"id": "f6d50c9272f47fd2bd2ceb426794ee42f03699f7",
"size": "3669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TM1py/Objects/Axis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94131"
}
],
"symlink_target": ""
}
|
from bambou import NURESTFetcher
class NUOverlayAddressPoolsFetcher(NURESTFetcher):
""" Represents a NUOverlayAddressPools fetcher
Notes:
This fetcher enables to fetch NUOverlayAddressPool objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUOverlayAddressPool class that is managed.
Returns:
.NUOverlayAddressPool: the managed class
"""
from .. import NUOverlayAddressPool
return NUOverlayAddressPool
|
{
"content_hash": "59cc92241cdcaab3f8d831956ab9ac36",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 22.64,
"alnum_prop": 0.6431095406360424,
"repo_name": "nuagenetworks/vspk-python",
"id": "d4ab37c67047d8752150cdeb4f7237d67b538031",
"size": "2177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vspk/v5_0/fetchers/nuoverlayaddresspools_fetcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
package_name = 'grift'
setup(name=package_name,
version='0.7.1',
description='A clean approach to app configuration',
keywords='app config configuration schema python',
maintainer_email='grift-maintainer@kensho.com',
url='https://github.com/kensho-technologies/grift',
packages=find_packages(),
package_data={package_name + '.tests': ['*.json']},
install_requires=[
'schematics==1.1.1',
'requests>=2.13.0',
'six>=1.11.0,<2',
],
)
|
{
"content_hash": "1b7ab8db4774ca658c07c10c17f5412d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 31.444444444444443,
"alnum_prop": 0.6113074204946997,
"repo_name": "kensho-technologies/grift",
"id": "2745f720d7c3c7013bcbe92e190d1ba09ec306c2",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34715"
}
],
"symlink_target": ""
}
|
import sys
import os
import inspect
import jsonpickle
from raygun4py import __version__
try:
import multiprocessing
USE_MULTIPROCESSING = True
except ImportError:
USE_MULTIPROCESSING = False
import platform
from datetime import datetime
from raygun4py import http_utilities
class RaygunMessageBuilder(object):
def __init__(self, options):
self.raygunMessage = RaygunMessage()
self.options = options
def new(self):
return RaygunMessageBuilder(self.options)
def build(self):
return self.raygunMessage
def set_machine_name(self, name):
self.raygunMessage.details['machineName'] = name
return self
def set_environment_details(self, extra_environment_data):
self.raygunMessage.details['environment'] = {
"environmentVariables": dict(os.environ.data) if hasattr(os.environ, 'data') else None,
"runtimeLocation": sys.executable,
"runtimeVersion": 'Python ' + sys.version
}
if self.options.get('transmit_environment_variables', True) is False:
self.raygunMessage.details['environment']['environmentVariables'] = None
# Wrap these so we gracefully fail if we cannot access the system details for any reason
try:
self.raygunMessage.details['environment']["processorCount"] = (
multiprocessing.cpu_count() if USE_MULTIPROCESSING else "n/a"
)
except Exception: # pragma: no cover
pass
try:
self.raygunMessage.details['environment']["architecture"] = platform.architecture()[0]
except Exception: # pragma: no cover
pass
try:
self.raygunMessage.details['environment']["cpu"] = platform.processor()
except Exception: # pragma: no cover
pass
try:
self.raygunMessage.details['environment']["oSVersion"] = "%s %s" % \
(platform.system(), platform.release())
except Exception: # pragma: no cover
pass
if extra_environment_data is not None:
merged = extra_environment_data.copy()
merged.update(self.raygunMessage.details['environment'])
self.raygunMessage.details['environment'] = merged
return self
def set_exception_details(self, raygunExceptionMessage):
self.raygunMessage.details['error'] = raygunExceptionMessage
return self
def set_client_details(self):
self.raygunMessage.details['client'] = {
"name": "raygun4py",
"version": __version__,
"clientUrl": "https://github.com/MindscapeHQ/raygun4py"
}
return self
def set_customdata(self, user_custom_data):
if type(user_custom_data) is dict:
self.raygunMessage.details['userCustomData'] = user_custom_data
return self
def set_tags(self, tags):
if type(tags) is list:
if not self.raygunMessage.details.get('tags'):
self.raygunMessage.details['tags'] = []
self.raygunMessage.details['tags'] += tags
return self
def set_request_details(self, request):
if not request:
return self
rg_request_details = http_utilities.build_wsgi_compliant_request(request)
self.raygunMessage.details['request'] = rg_request_details
return self
def set_version(self, version):
self.raygunMessage.details['version'] = version
return self
def set_user(self, user):
if user is not None:
self.raygunMessage.details['user'] = user
return self
class RaygunMessage(object):
def __init__(self):
self.occurredOn = datetime.utcnow()
self.details = {}
def get_error(self):
return self.details['error']
def get_details(self):
return self.details
class RaygunErrorMessage(object):
def __init__(self, exc_type, exc_value, exc_traceback, options):
self.className = exc_type.__name__
self.message = "%s: %s" % (exc_type.__name__, exc_value)
self.stackTrace = []
frames = None
try:
frames = inspect.getinnerframes(exc_traceback)
if frames:
for frame in frames:
localVariables = None
if 'transmitLocalVariables' in options and options['transmitLocalVariables'] is True:
localVariables = self._get_locals(frame[0])
self.stackTrace.append({
'lineNumber': frame[2],
'className': frame[3],
'fileName': frame[1],
'methodName': frame[4][0] if frame[4] is not None else None,
'localVariables': localVariables
})
if 'transmitGlobalVariables' in options and options['transmitGlobalVariables'] is True and len(frames) > 0:
self.globalVariables = frames[-1][0].f_globals
finally:
del frames
self.data = ""
try:
jsonpickle.encode(self, unpicklable=False)
except Exception as e:
if self.globalVariables:
self.globalVariables = None
try:
jsonpickle.encode(self, unpicklable=False)
except Exception as e:
for frame in self.stackTrace:
if 'localVariables' in frame:
frame['localVariables'] = None
def get_classname(self):
return self.className
def _get_locals(self, frame):
result = {}
localVars = getattr(frame, 'f_locals', {})
if '__traceback_hide__' not in localVars:
for key in localVars:
try:
# Note that str() *can* fail; thus protect against it as much as we can.
if type(localVars[key]) is unicode:
result[key] = localVars[key]
else:
result[key] = str(localVars[key])
except Exception as e:
try:
r = repr(localVars[key])
except Exception as re:
r = "Couldn't convert to repr due to {0}".format(re)
result[key] = "!!! Couldn't convert {0!r} (repr: {1}) due to {2!r} !!!".format(key, r, e)
return result
|
{
"content_hash": "56121f6c27b475f313efa9eb6730408c",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 123,
"avg_line_length": 33.30612244897959,
"alnum_prop": 0.5681678921568627,
"repo_name": "MindscapeHQ/raygun4py",
"id": "a36dca408968906da2498bb6e432ff5397ef4daa",
"size": "6528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/raygun4py/raygunmsgs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105314"
}
],
"symlink_target": ""
}
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import sys
import tarfile
import shutil
from utils import logger
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import IN, FILE_IN, FILE_OUT
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.api import barrier, compss_wait_on, compss_open, compss_delete_file
except ImportError:
logger.warn("[Warning] Cannot import \"pycompss\" API packages.")
logger.warn(" Using mock decorators.")
from utils.dummy_pycompss import IN, FILE_IN, FILE_OUT # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task, constraint # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import barrier, compss_wait_on, compss_open, compss_delete_file # pylint: disable=ungrouped-imports
from basic_modules.tool import Tool
from basic_modules.metadata import Metadata
from tool.fastq_splitter import fastq_splitter
from tool.aligner_utils import alignerUtils
from tool.bam_utils import bamUtilsTask
# ------------------------------------------------------------------------------
class bwaAlignerTool(Tool): # pylint: disable=invalid-name
"""
Tool for aligning sequence reads to a genome using BWA
"""
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("BWA ALN Aligner")
Tool.__init__(self)
if configuration is None:
configuration = {}
self.configuration.update(configuration)
@task(returns=bool, genome_file_name=IN, genome_idx=FILE_IN,
amb_file=FILE_OUT, ann_file=FILE_OUT, bwt_file=FILE_OUT,
pac_file=FILE_OUT, sa_file=FILE_OUT)
def untar_index( # pylint: disable=too-many-locals,too-many-arguments
self, genome_file_name, genome_idx,
amb_file, ann_file, bwt_file, pac_file, sa_file):
"""
Extracts the BWA index files from the genome index tar file.
Parameters
----------
genome_file_name : str
Location string of the genome fasta file
genome_idx : str
Location of the BWA index file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
Returns
-------
bool
Boolean indicating if the task was successful
"""
if "no-untar" in self.configuration and self.configuration["no-untar"] is True:
return True
gfl = genome_file_name.split("/")
au_handle = alignerUtils()
au_handle.bwa_untar_index(
gfl[-1], genome_idx, amb_file, ann_file, bwt_file, pac_file, sa_file)
return True
@constraint(ComputingUnits="4")
@task(returns=bool, genome_file_loc=FILE_IN, read_file_loc=FILE_IN,
bam_loc=FILE_OUT, amb_file=FILE_IN, ann_file=FILE_IN, bwt_file=FILE_IN,
pac_file=FILE_IN, sa_file=FILE_IN, aln_params=IN, isModifier=False)
def bwa_aligner_single( # pylint: disable=too-many-arguments, no-self-use
self, genome_file_loc, read_file_loc, bam_loc,
amb_file, ann_file, bwt_file, pac_file, sa_file, # pylint: disable=unused-argument
aln_params):
"""
BWA ALN Aligner - Single Ended
Parameters
----------
genome_file_loc : str
Location of the genomic fasta
read_file_loc : str
Location of the FASTQ file
bam_loc : str
Location of the output aligned bam file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
aln_params : dict
Alignment parameters
Returns
-------
bam_loc : str
Location of the output file
"""
if (
os.path.isfile(read_file_loc) is False or
os.path.getsize(read_file_loc) == 0):
return False
out_bam = read_file_loc + '.out.bam'
au_handle = alignerUtils()
logger.info(
"BWA FINISHED: " + str(au_handle.bwa_aln_align_reads_single(
genome_file_loc, read_file_loc, out_bam, aln_params))
)
try:
with open(bam_loc, "wb") as f_out:
with open(out_bam, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("SINGLE ALIGNER: I/O error({0}): {1}".format(error.errno, error.strerror))
return False
os.remove(out_bam)
return True
@constraint(ComputingUnits="4")
@task(returns=bool, genome_file_loc=FILE_IN, read_file_loc1=FILE_IN,
read_file_loc2=FILE_IN, bam_loc=FILE_OUT,
amb_file=FILE_IN, ann_file=FILE_IN, bwt_file=FILE_IN,
pac_file=FILE_IN, sa_file=FILE_IN, aln_params=IN, isModifier=False)
def bwa_aligner_paired( # pylint: disable=too-many-arguments, no-self-use, too-many-locals
self, genome_file_loc, read_file_loc1, read_file_loc2, bam_loc,
amb_file, ann_file, bwt_file, pac_file, sa_file, aln_params): # pylint: disable=unused-argument
"""
BWA ALN Aligner - Paired End
Parameters
----------
genome_file_loc : str
Location of the genomic fasta
read_file_loc1 : str
Location of the FASTQ file
read_file_loc2 : str
Location of the FASTQ file
bam_loc : str
Location of the output aligned bam file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
aln_params : dict
Alignment parameters
Returns
-------
bam_loc : str
Location of the output file
"""
out_bam = read_file_loc1 + '.out.bam'
au_handle = alignerUtils()
logger.info(
"BWA FINISHED: " + str(au_handle.bwa_aln_align_reads_paired(
genome_file_loc, read_file_loc1, read_file_loc2, out_bam, aln_params))
)
try:
with open(bam_loc, "wb") as f_out:
with open(out_bam, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("PARIED ALIGNER: I/O error({0}): {1}".format(error.errno, error.strerror))
return False
os.remove(out_bam)
return True
@staticmethod
def get_aln_params(params):
"""
Function to handle to extraction of commandline parameters and formatting
them for use in the aligner for BWA ALN
Parameters
----------
params : dict
Returns
-------
list
"""
command_parameters = {
"bwa_edit_dist_param": ["-n", True],
"bwa_max_gap_open_param": ["-o", True],
"bwa_max_gap_ext_param": ["-e", True],
"bwa_dis_long_del_range_param": ["-d", True],
"bwa_dis_indel_range_param": ["-i", True],
"bwa_n_subseq_seed_param": ["-l", True],
"bwa_max_edit_dist_param": ["-k", True],
"bwa_mismatch_penalty_param": ["-M", True],
"bwa_gap_open_penalty_param": ["-O", True],
"bwa_gap_ext_penalty_param": ["-E", True],
"bwa_use_subopt_threshold_param": ["-R", True],
"bwa_reverse_query_param": ["-c", False],
"bwa_dis_iter_search_param": ["-N", False],
"bwa_read_trim_param": ["-q", True],
"bwa_barcode_len_param": ["-B", True]
}
command_params = []
for param in params:
if param in command_parameters:
if command_parameters[param][1] and params[param] != "":
command_params = command_params + [command_parameters[param][0], params[param]]
else:
if command_parameters[param][0] and params[param] is not False:
command_params.append(command_parameters[param][0])
return command_params
def run(self, input_files, input_metadata, output_files): # pylint: disable=too-many-branches,too-many-locals,too-many-statements
"""
The main function to align bam files to a genome using BWA
Parameters
----------
input_files : dict
File 0 is the genome file location, file 1 is the FASTQ file
metadata : dict
output_files : dict
Returns
-------
output_files : dict
First element is a list of output_bam_files, second element is the
matching meta data
output_metadata : dict
"""
tasks_done = 0
task_count = 6
untar_idx = True
if "no-untar" in self.configuration and self.configuration["no-untar"] is True:
untar_idx = False
task_count = 5
index_files = {
"amb": input_files["genome"] + ".amb",
"ann": input_files["genome"] + ".ann",
"bwt": input_files["genome"] + ".bwt",
"pac": input_files["genome"] + ".pac",
"sa": input_files["genome"] + ".sa"
}
if untar_idx:
logger.progress("Untar Index", task_id=tasks_done, total=task_count)
self.untar_index(
input_files["genome"],
input_files["index"],
index_files["amb"],
index_files["ann"],
index_files["bwt"],
index_files["pac"],
index_files["sa"]
)
tasks_done += 1
logger.progress("Untar Index", task_id=tasks_done, total=task_count)
sources = [input_files["genome"]]
fqs = fastq_splitter()
fastq1 = input_files["loc"]
sources.append(input_files["loc"])
logger.progress("FASTQ Splitter", task_id=tasks_done, total=task_count)
fastq_file_gz = os.path.join(
self.configuration["execution"], os.path.split(fastq1)[1] + ".tar.gz")
if "fastq2" in input_files:
fastq2 = input_files["fastq2"]
sources.append(input_files["fastq2"])
fastq_file_list = fqs.paired_splitter(
fastq1, fastq2, fastq_file_gz
)
else:
fastq_file_list = fqs.single_splitter(
fastq1, fastq_file_gz
)
# Required to prevent iterating over the future objects
fastq_file_list = compss_wait_on(fastq_file_list)
# compss_delete_file(fastq1)
# if "fastq2" in input_files:
# compss_delete_file(fastq2)
if not fastq_file_list:
logger.fatal("FASTQ SPLITTER: run failed")
return {}, {}
if hasattr(sys, '_run_from_cmdl') is True:
pass
else:
logger.info("Getting the tar file")
with compss_open(fastq_file_gz, "rb") as f_in:
with open(fastq_file_gz, "wb") as f_out:
f_out.write(f_in.read())
gz_data_path = os.path.split(fastq_file_gz)[0]
try:
tar = tarfile.open(fastq_file_gz)
tar.extractall(path=gz_data_path)
tar.close()
os.remove(fastq_file_gz)
compss_delete_file(fastq_file_gz)
except tarfile.TarError:
logger.fatal("Split FASTQ files: Malformed tar file")
return {}, {}
tasks_done += 1
logger.progress("FASTQ Splitter", task_id=tasks_done, total=task_count)
# input and output share most metadata
output_metadata = {}
output_bam_file = output_files["output"]
# output_bai_file = output_files["bai"]
logger.info("BWA ALIGNER: Aligning sequence reads to the genome")
logger.progress("ALIGNER - jobs = " + str(len(fastq_file_list)),
task_id=tasks_done, total=task_count)
output_bam_list = []
for fastq_file_pair in fastq_file_list:
if "fastq2" in input_files:
tmp_fq1 = os.path.join(gz_data_path, "tmp", fastq_file_pair[0])
tmp_fq2 = os.path.join(gz_data_path, "tmp", fastq_file_pair[1])
output_bam_file_tmp = tmp_fq1 + ".bam"
output_bam_list.append(output_bam_file_tmp)
logger.info("BWA ALN FILES: " + tmp_fq1 + " - " + tmp_fq2)
self.bwa_aligner_paired(
str(input_files["genome"]), tmp_fq1, tmp_fq2, output_bam_file_tmp,
index_files["amb"],
index_files["ann"],
index_files["bwt"],
index_files["pac"],
index_files["sa"],
self.get_aln_params(self.configuration)
)
else:
tmp_fq = os.path.join(gz_data_path, "tmp", fastq_file_pair[0])
output_bam_file_tmp = tmp_fq + ".bam"
output_bam_list.append(output_bam_file_tmp)
logger.info("BWA ALN FILES: " + tmp_fq)
self.bwa_aligner_single(
str(input_files["genome"]), tmp_fq, output_bam_file_tmp,
index_files["amb"],
index_files["ann"],
index_files["bwt"],
index_files["pac"],
index_files["sa"],
self.get_aln_params(self.configuration)
)
barrier()
# Remove all tmp fastq files now that the reads have been aligned
if untar_idx:
for idx_file in index_files:
compss_delete_file(index_files[idx_file])
if hasattr(sys, '_run_from_cmdl') is True:
pass
else:
for fastq_file_pair in fastq_file_list:
tmp_fq = os.path.join(gz_data_path, "tmp", fastq_file_pair[0])
compss_delete_file(tmp_fq)
try:
os.remove(tmp_fq)
except (OSError, IOError) as msg:
logger.warn(
"Unable to remove file I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
if "fastq2" in input_files:
tmp_fq = os.path.join(gz_data_path, "tmp", fastq_file_pair[1])
compss_delete_file(tmp_fq)
try:
os.remove(tmp_fq)
except (OSError, IOError) as msg:
logger.warn(
"Unable to remove file I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
tasks_done += 1
logger.progress("ALIGNER", task_id=tasks_done, total=task_count)
bam_handle = bamUtilsTask()
logger.progress("Merging bam files", task_id=tasks_done, total=task_count)
bam_handle.bam_merge(output_bam_list)
tasks_done += 1
logger.progress("Merging bam files", task_id=tasks_done, total=task_count)
# Remove all bam files that are not the final file
for i in output_bam_list[1:len(output_bam_list)]:
try:
compss_delete_file(i)
os.remove(i)
except (OSError, IOError) as msg:
logger.warn(
"Unable to remove file I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
logger.progress("Sorting merged bam file", task_id=tasks_done, total=task_count)
bam_handle.bam_sort(output_bam_list[0])
tasks_done += 1
logger.progress("Sorting merged bam file", task_id=tasks_done, total=task_count)
logger.progress("Copying bam file into the output file",
task_id=tasks_done, total=task_count)
bam_handle.bam_copy(output_bam_list[0], output_bam_file)
tasks_done += 1
logger.progress("Copying bam file into the output file",
task_id=tasks_done, total=task_count)
compss_delete_file(output_bam_list[0])
bam_handle.bam_index(output_bam_file, output_files["bai"])
logger.info("BWA ALIGNER: Alignments complete")
barrier()
try:
shutil.rmtree(gz_data_path + "/tmp")
except (OSError, IOError) as msg:
logger.warn(
"Already tidy I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
output_metadata = {
"bam": Metadata(
data_type=input_metadata['loc'].data_type,
file_type="BAM",
file_path=output_files["output"],
sources=sources,
taxon_id=input_metadata["genome"].taxon_id,
meta_data={
"assembly": input_metadata["genome"].meta_data["assembly"],
"tool": "bwa_aligner",
"parameters": self.get_aln_params(self.configuration),
"associated_files": [output_files["bai"]]
}
),
"bai": Metadata(
data_type=input_metadata['loc'].data_type,
file_type="BAI",
file_path=output_files["bai"],
sources=sources,
taxon_id=input_metadata["genome"].taxon_id,
meta_data={
"assembly": input_metadata["genome"].meta_data["assembly"],
"tool": "bs_seeker_aligner",
"associated_master": output_bam_file
}
)
}
return (
{"bam": output_files["output"], "bai": output_files["bai"]},
output_metadata
)
# ------------------------------------------------------------------------------
|
{
"content_hash": "86a11c05549c7ed24a210030d5fa664a",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 134,
"avg_line_length": 36.1651376146789,
"alnum_prop": 0.5331811263318113,
"repo_name": "Multiscale-Genomics/mg-process-fastq",
"id": "e43cb310160f3ea3614f9fd337af1482392538ba",
"size": "19710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tool/bwa_aligner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "831267"
},
{
"name": "R",
"bytes": "5100"
},
{
"name": "Shell",
"bytes": "24651"
}
],
"symlink_target": ""
}
|
import os
import glob
import sys
import platform
import subprocess
import difflib
import filecmp
import shutil
from optparse import OptionParser
#
# Get standard testsuite test arguments: srcdir exepath
#
srcdir = "."
tmpdir = "."
path = "../.."
# Options for the command line
parser = OptionParser()
parser.add_option("-p", "--path", help="add to executable path",
action="store", type="string", dest="path", default="")
parser.add_option("--devenv-config", help="use a MS Visual Studio configuration",
action="store", type="string", dest="devenv_config", default="")
parser.add_option("--solution-path", help="MS Visual Studio solution path",
action="store", type="string", dest="solution_path", default="")
(options, args) = parser.parse_args()
if args and len(args) > 0 :
srcdir = args[0]
srcdir = os.path.abspath (srcdir) + "/"
os.chdir (srcdir)
if args and len(args) > 1 :
path = args[1]
path = os.path.normpath (path)
tmpdir = "."
tmpdir = os.path.abspath (tmpdir)
refdir = "ref/"
refdirlist = [ refdir ]
parent = "../../../../../"
test_source_dir = "../../../../testsuite/" + os.path.basename(os.path.abspath(srcdir))
command = ""
outputs = [ "out.txt" ] # default
failureok = 0
failthresh = 0.004
hardfail = 0.012
failpercent = 0.02
image_extensions = [ ".tif", ".tx", ".exr", ".jpg", ".png", ".rla", ".dpx" ]
# print ("srcdir = " + srcdir)
# print ("tmpdir = " + tmpdir)
# print ("path = " + path)
# print ("refdir = " + refdir)
# print ("test source dir = " + test_source_dir)
if platform.system() == 'Windows' :
if not os.path.exists("./ref") :
shutil.copytree (os.path.join (test_source_dir, "ref"), "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
shutil.copytree (os.path.join (test_source_dir, "src"), "./src")
# if not os.path.exists("../common") :
# shutil.copytree ("../../../testsuite/common", "..")
else :
if not os.path.exists("./ref") :
os.symlink (os.path.join (test_source_dir, "ref"), "./ref")
if os.path.exists (os.path.join (test_source_dir, "src")) and not os.path.exists("./src") :
os.symlink (os.path.join (test_source_dir, "src"), "./src")
if not os.path.exists("../common") :
os.symlink ("../../../testsuite/common", "../common")
###########################################################################
# Handy functions...
# Compare two text files. Returns 0 if they are equal otherwise returns
# a non-zero value and writes the differences to "diff_file".
# Based on the command-line interface to difflib example from the Python
# documentation
def text_diff (fromfile, tofile, diff_file=None):
import time
try:
fromdate = time.ctime (os.stat (fromfile).st_mtime)
todate = time.ctime (os.stat (tofile).st_mtime)
fromlines = open (fromfile, 'rU').readlines()
tolines = open (tofile, 'rU').readlines()
except:
print ("Unexpected error:", sys.exc_info()[0])
return -1
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile,
fromdate, todate)
# Diff is a generator, but since we need a way to tell if it is
# empty we just store all the text in advance
diff_lines = [l for l in diff]
if not diff_lines:
return 0
if diff_file:
try:
open (diff_file, 'w').writelines (diff_lines)
print ("Diff " + fromfile + " vs " + tofile + " was:\n-------")
# print (diff)
print ("".join(diff_lines))
except:
print ("Unexpected error:", sys.exc_info()[0])
return 1
def oiio_relpath (path, start=os.curdir):
"Wrapper around os.path.relpath which always uses '/' as the separator."
p = os.path.relpath (path, start)
return p if sys.platform != "win32" else p.replace ('\\', '/')
def oiio_app (app):
# When we use Visual Studio, built applications are stored
# in the app/$(OutDir)/ directory, e.g., Release or Debug.
if (platform.system () != 'Windows' or options.devenv_config == ""):
return os.path.join (path, "src", app, app) + " "
else:
return os.path.join (path, "src", app, options.devenv_config, app) + " "
# Construct a command that will print info for an image, appending output to
# the file "out.txt". If 'safematch' is nonzero, it will exclude printing
# of fields that tend to change from run to run or release to release.
def info_command (file, extraargs="", safematch=0, hash=True) :
if safematch :
extraargs += " --no-metamatch \"DateTime|Software|OriginatingProgram|ImageHistory\""
if hash :
extraargs += " --hash"
return (oiio_app("oiiotool") + "--info -v -a " + extraargs
+ " " + oiio_relpath(file,tmpdir) + " >> out.txt ;\n")
# Construct a command that will compare two images, appending output to
# the file "out.txt". We allow a small number of pixels to have up to
# 1 LSB (8 bit) error, it's very hard to make different platforms and
# compilers always match to every last floating point bit.
def diff_command (fileA, fileB, extraargs="", silent=False, concat=True) :
command = (oiio_app("idiff") + "-a"
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " -warnpercent " + str(failpercent)
+ " " + extraargs + " " + oiio_relpath(fileA,tmpdir)
+ " " + oiio_relpath(fileB,tmpdir))
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
return command
# Construct a command that will create a texture, appending console
# output to the file "out.txt".
def maketx_command (infile, outfile, extraargs="",
showinfo=False, showinfo_extra="",
silent=False, concat=True) :
command = (oiio_app("maketx")
+ " " + oiio_relpath(infile,tmpdir)
+ " " + extraargs
+ " -o " + oiio_relpath(outfile,tmpdir) )
if not silent :
command += " >> out.txt"
if concat:
command += " ;\n"
if showinfo:
command += info_command (outfile, extraargs=showinfo_extra, safematch=1)
return command
# Construct a command that will test the basic ability to read and write
# an image, appending output to the file "out.txt". First, iinfo the
# file, including a hash (VERY unlikely not to match if we've read
# correctly). If testwrite is nonzero, also iconvert the file to make a
# copy (tests writing that format), and then idiff to make sure it
# matches the original.
def rw_command (dir, filename, testwrite=1, use_oiiotool=0, extraargs="",
preargs="", idiffextraargs="") :
fn = oiio_relpath (dir + "/" + filename, tmpdir)
cmd = (oiio_app("oiiotool") + " --info -v -a --hash " + fn
+ " >> out.txt ;\n")
if testwrite :
if use_oiiotool :
cmd = (cmd + oiio_app("oiiotool") + preargs + " " + fn
+ " " + extraargs + " -o " + filename + " >> out.txt ;\n")
else :
cmd = (cmd + oiio_app("iconvert") + preargs + " " + fn
+ " " + extraargs + " " + filename + " >> out.txt ;\n")
cmd = (cmd + oiio_app("idiff") + " -a " + fn
+ " -fail " + str(failthresh)
+ " -failpercent " + str(failpercent)
+ " -hardfail " + str(hardfail)
+ " -warn " + str(2*failthresh)
+ " " + idiffextraargs + " " + filename + " >> out.txt ;\n")
return cmd
# Construct a command that will testtex
def testtex_command (file, extraargs="") :
cmd = (oiio_app("testtex") + " " + file + " " + extraargs + " " +
" >> out.txt ;\n")
return cmd
# Construct a command that will run oiiotool and append its output to out.txt
def oiiotool (args, silent=False, concat=True) :
cmd = (oiio_app("oiiotool") + " " + args)
if not silent :
cmd += " >> out.txt"
if concat:
cmd += " ;\n"
return cmd
# Check one output file against reference images in a list of reference
# directories. For each directory, it will first check for a match under
# the identical name, and if that fails, it will look for alternatives of
# the form "basename-*.ext".
def checkref (name, refdirlist) :
# Break the output into prefix+extension
(prefix, extension) = os.path.splitext(name)
ok = 0
for ref in refdirlist :
# We will first compare name to ref/name, and if that fails, we will
# compare it to everything else that matches ref/prefix-*.extension.
# That allows us to have multiple matching variants for different
# platforms, etc.
defaulttest = os.path.join(ref,name)
for testfile in ([defaulttest] + glob.glob (os.path.join (ref, prefix+"-*"+extension))) :
if not os.path.exists(testfile) :
continue
# print ("comparing " + name + " to " + testfile)
if extension in image_extensions :
# images -- use idiff
cmpcommand = diff_command (name, testfile, concat=False, silent=True)
cmpresult = os.system (cmpcommand)
elif extension == ".txt" :
cmpresult = text_diff (name, testfile, name + ".diff")
else :
# anything else
cmpresult = 0
if os.path.exists(testfile) and filecmp.cmp (name, testfile) :
cmpresult = 0
else :
cmpresult = 1
if cmpresult == 0 :
return (True, testfile) # we're done
return (False, defaulttest)
# Run 'command'. For each file in 'outputs', compare it to the copy
# in 'ref/'. If all outputs match their reference copies, return 0
# to pass. If any outputs do not match their references return 1 to
# fail.
def runtest (command, outputs, failureok=0) :
err = 0
# print ("working dir = " + tmpdir)
os.chdir (srcdir)
open ("out.txt", "w").close() # truncate out.txt
if options.path != "" :
sys.path = [options.path] + sys.path
print ("command = " + command)
test_environ = None
if (platform.system () == 'Windows') and (options.solution_path != "") and \
(os.path.isdir (options.solution_path)):
test_environ = os.environ
libOIIO_args = [options.solution_path, "libOpenImageIO"]
if options.devenv_config != "":
libOIIO_args.append (options.devenv_config)
libOIIO_path = os.path.normpath (os.path.join (*libOIIO_args))
test_environ["PATH"] = libOIIO_path + ';' + test_environ["PATH"]
for sub_command in [c.strip() for c in command.split(';') if c.strip()]:
cmdret = subprocess.call (sub_command, shell=True, env=test_environ)
if cmdret != 0 and failureok == 0 :
print ("#### Error: this command failed: ", sub_command)
print ("FAIL")
err = 1
for out in outputs :
(prefix, extension) = os.path.splitext(out)
(ok, testfile) = checkref (out, refdirlist)
if ok :
if extension in image_extensions :
# If we got a match for an image, save the idiff results
os.system (diff_command (out, testfile, silent=False))
print ("PASS: " + out + " matches " + testfile)
else :
err = 1
print ("NO MATCH for " + out)
print ("FAIL " + out)
if extension == ".txt" :
# If we failed to get a match for a text file, print the
# file and the diff, for easy debugging.
print ("-----" + out + "----->")
print (open(out,'r').read() + "<----------")
print ("Diff was:\n-------")
print (open (out+".diff", 'rU').read())
if extension in image_extensions :
# If we failed to get a match for an image, send the idiff
# results to the console
os.system (diff_command (out, testfile, silent=False))
return (err)
##########################################################################
#
# Read the individual run.py file for this test, which will define
# command and outputs.
#
with open(os.path.join(test_source_dir,"run.py")) as f:
code = compile(f.read(), "run.py", 'exec')
exec (code)
# Allow a little more slop for slight pixel differences when in DEBUG
# mode or when running on remote Travis-CI or Appveyor machines.
if (("TRAVIS" in os.environ and os.environ["TRAVIS"]) or
("APPVEYOR" in os.environ and os.environ["APPVEYOR"]) or
("DEBUG" in os.environ and os.environ["DEBUG"])) :
failthresh *= 2.0
hardfail *= 2.0
failpercent *= 2.0
# Run the test and check the outputs
ret = runtest (command, outputs, failureok=failureok)
sys.exit (ret)
|
{
"content_hash": "557aa9a6cbde75a14356ba5307ddeefd",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 97,
"avg_line_length": 37.60344827586207,
"alnum_prop": 0.5732080085587651,
"repo_name": "bdeluca/oiio",
"id": "68889a9beb6631b80bb90c8bbee81fadc94e834f",
"size": "13109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testsuite/runtest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "61734"
},
{
"name": "C++",
"bytes": "5265697"
},
{
"name": "CMake",
"bytes": "136695"
},
{
"name": "Makefile",
"bytes": "19247"
},
{
"name": "Python",
"bytes": "183345"
},
{
"name": "Shell",
"bytes": "6788"
},
{
"name": "TeX",
"bytes": "821211"
}
],
"symlink_target": ""
}
|
from contextlib import contextmanager
import numpy as np
from matplotlib.colors import ColorConverter
from vispy import scene
from vispy.scene.visuals import Arrow
class MultiColorScatter(scene.visuals.Markers):
"""
This is a helper class to make it easier to show multiple markers at
specific positions and control exactly which marker should be on top of
which.
"""
def __init__(self, *args, **kwargs):
self.layers = {}
self._combined_data = None
self._skip_update = False
self._error_vector_widget = None
super(MultiColorScatter, self).__init__(*args, **kwargs)
@contextmanager
def delay_update(self):
self._skip_update = True
yield
self._skip_update = False
def allocate(self, label):
if label in self.layers:
raise ValueError("Layer {0} already exists".format(label))
else:
self.layers[label] = {'data': None,
'mask': None,
'errors': None,
'vectors': None,
'draw_arrows': False,
'color': np.asarray((1., 1., 1.)),
'alpha': 1.,
'zorder': lambda: 0,
'size': 10,
'visible': True}
def deallocate(self, label):
self.layers.pop(label)
self._update()
def set_data_values(self, label, x, y, z):
"""
Set the position of the datapoints
"""
# TODO: avoid re-allocating an array every time
self.layers[label]['data'] = np.array([x, y, z]).transpose()
self._update()
def set_visible(self, label, visible):
self.layers[label]['visible'] = visible
self._update()
def set_mask(self, label, mask):
self.layers[label]['mask'] = mask
self._update()
def set_errors(self, label, error_lines):
self.layers[label]['errors'] = error_lines
self._update()
def set_vectors(self, label, vectors):
self.layers[label]['vectors'] = vectors
self._update()
def set_draw_arrows(self, label, draw_arrows):
self.layers[label]['draw_arrows'] = draw_arrows
self._update()
def set_size(self, label, size):
if not np.isscalar(size) and size.ndim > 1:
raise Exception("size should be a 1-d array")
self.layers[label]['size'] = size
self._update()
def set_color(self, label, rgb):
if isinstance(rgb, str):
rgb = ColorConverter().to_rgb(rgb)
self.layers[label]['color'] = np.asarray(rgb)
self._update()
def set_alpha(self, label, alpha):
self.layers[label]['alpha'] = alpha
self._update()
def set_zorder(self, label, zorder):
self.layers[label]['zorder'] = zorder
self._update()
def update_line_width(self, width):
if self._error_vector_widget:
self._error_vector_widget.set_data(width=width)
def _update(self):
if self._skip_update:
return
data = []
colors = []
sizes = []
lines = []
line_colors = []
arrows = []
arrow_colors = []
for label in sorted(self.layers, key=lambda x: self.layers[x]['zorder']()):
layer = self.layers[label]
if not layer['visible'] or layer['data'] is None:
continue
input_points = layer['data'].shape[0]
if layer['mask'] is None:
n_points = input_points
else:
n_points = np.sum(layer['mask'])
if input_points > 0 and n_points > 0:
# Data
if layer['mask'] is None:
data.append(layer['data'])
else:
data.append(layer['data'][layer['mask'], :])
# Colors
if layer['color'].ndim == 1:
rgba = np.hstack([layer['color'], 1])
rgba = np.repeat(rgba, n_points).reshape(4, -1).transpose()
else:
if layer['mask'] is None:
rgba = layer['color'].copy()
else:
rgba = layer['color'][layer['mask']]
rgba[:, 3] *= layer['alpha']
colors.append(rgba)
# Sizes
if np.isscalar(layer['size']):
size = np.repeat(layer['size'], n_points)
else:
if layer['mask'] is None:
size = layer['size']
else:
size = layer['size'][layer['mask']]
sizes.append(size)
# Error bar and colors
if layer['errors'] is not None:
for error_set in layer['errors']:
if layer['mask'] is None:
out = error_set
else:
out = error_set[layer['mask']]
out = out.reshape((-1, 3))
lines.append(out)
line_colors.append(np.repeat(rgba, 2, axis=0))
if layer['vectors'] is not None:
if layer['mask'] is None:
out = layer['vectors']
else:
out = layer['vectors'][layer['mask']]
lines.append(out.reshape((-1, 3)))
line_colors.append(np.repeat(rgba, 2, axis=0))
if layer['draw_arrows']:
arrows.append(out)
arrow_colors.append(rgba)
if len(data) == 0:
self.visible = False
return
else:
self.visible = True
data = np.vstack(data)
colors = np.vstack(colors)
sizes = np.hstack(sizes)
self.set_data(data, edge_color=colors, face_color=colors, size=sizes)
if len(lines) == 0:
if self._error_vector_widget is not None:
self._error_vector_widget.visible = False
return
else:
if self._error_vector_widget is None:
widget = Arrow(parent=self, connect="segments")
widget.set_gl_state(depth_test=False, blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self._error_vector_widget = widget
self._error_vector_widget.visible = True
lines = np.vstack(lines)
line_colors = np.vstack(line_colors)
self._error_vector_widget.set_data(pos=lines, color=line_colors)
arrows = np.vstack(arrows) if len(arrows) > 0 else np.array([])
arrow_colors = np.vstack(arrow_colors) if len(arrow_colors) else np.array([])
self._error_vector_widget.set_data(arrows=arrows)
self._error_vector_widget.arrow_color = arrow_colors
def draw(self, *args, **kwargs):
if len(self.layers) == 0:
return
else:
try:
super(MultiColorScatter, self).draw(*args, **kwargs)
except Exception:
pass
if __name__ == "__main__": # pragma: nocover
from vispy import app, scene
canvas = scene.SceneCanvas(keys='interactive')
view = canvas.central_widget.add_view()
view.camera = scene.TurntableCamera(up='z', fov=60)
x = np.random.random(20)
y = np.random.random(20)
z = np.random.random(20)
multi_scat = MultiColorScatter(parent=view.scene)
multi_scat.allocate('data')
multi_scat.set_zorder('data', lambda: 0)
multi_scat.set_data_values('data', x, y, z)
multi_scat.allocate('subset1')
multi_scat.set_mask('subset1', np.random.random(20) > 0.5)
multi_scat.set_color('subset1', 'red')
multi_scat.set_zorder('subset1', lambda: 1)
multi_scat.allocate('subset2')
multi_scat.set_mask('subset2', np.random.random(20) > 0.5)
multi_scat.set_color('subset2', 'green')
multi_scat.set_zorder('subset2', lambda: 2)
multi_scat.set_alpha('subset2', 0.5)
multi_scat.set_size('subset2', 20)
axis = scene.visuals.XYZAxis(parent=view.scene)
canvas.show()
app.run()
|
{
"content_hash": "d4dd0f1f93d8dcbd0e17527710430d32",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 85,
"avg_line_length": 32.46153846153846,
"alnum_prop": 0.5054502369668247,
"repo_name": "glue-viz/glue-3d-viewer",
"id": "66dc7a0199aa8727637acc4fd2535a57b9cc1e22",
"size": "8440",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glue_vispy_viewers/scatter/multi_scatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "100142"
}
],
"symlink_target": ""
}
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from panda3d.core import Vec4
from rpcore.render_stage import RenderStage
from rpcore.globals import Globals
from rpcore.image import Image
class AutoExposureStage(RenderStage):
required_pipes = ["ShadedScene"]
required_inputs = []
@property
def produced_pipes(self):
return {"ShadedScene": self.target_apply.color_tex,
"Exposure": self.tex_exposure}
def create(self):
# Create the target which converts the scene color to a luminance
self.target_lum = self.create_target("GetLuminance")
self.target_lum.size = -4
self.target_lum.add_color_attachment(bits=(16, 0, 0, 0))
self.target_lum.prepare_buffer()
self.mip_targets = []
# Create the storage for the exposure, this stores the current and last
# frames exposure
# XXX: We have to use F_r16 instead of F_r32 because of a weird nvidia
# driver bug! However, 16 bits should be enough for sure.
self.tex_exposure = Image.create_buffer("ExposureStorage", 1, "R16")
self.tex_exposure.set_clear_color(Vec4(0.5))
self.tex_exposure.clear_image()
# Create the target which extracts the exposure from the average brightness
self.target_analyze = self.create_target("AnalyzeBrightness")
self.target_analyze.size = 1, 1
self.target_analyze.prepare_buffer()
self.target_analyze.set_shader_input("ExposureStorage", self.tex_exposure)
# Create the target which applies the generated exposure to the scene
self.target_apply = self.create_target("ApplyExposure")
self.target_apply.add_color_attachment(bits=16)
self.target_apply.prepare_buffer()
self.target_apply.set_shader_input("Exposure", self.tex_exposure)
def set_dimensions(self):
for old_target in self.mip_targets:
self.remove_target(old_target)
wsize_x = (Globals.resolution.x + 3) // 4
wsize_y = (Globals.resolution.y + 3) // 4
# Create the targets which downscale the luminance mipmaps
self.mip_targets = []
last_tex = self.target_lum.color_tex
while wsize_x >= 4 or wsize_y >= 4:
wsize_x = (wsize_x + 3) // 4
wsize_y = (wsize_y + 3) // 4
mip_target = self.create_target("DScaleLum:S" + str(wsize_x))
mip_target.add_color_attachment(bits=(16, 0, 0, 0))
mip_target.size = wsize_x, wsize_y
mip_target.sort = self.target_lum.sort + len(self.mip_targets)
mip_target.prepare_buffer()
mip_target.set_shader_input("SourceTex", last_tex)
self.mip_targets.append(mip_target)
last_tex = mip_target.color_tex
self.target_analyze.set_shader_input("DownscaledTex", self.mip_targets[-1].color_tex)
# Shaders might not have been loaded at this point
if hasattr(self, "mip_shader"):
for target in self.mip_targets:
target.shader = self.mip_shader
def reload_shaders(self):
self.target_lum.shader = self.load_plugin_shader("generate_luminance.frag.glsl")
self.target_analyze.shader = self.load_plugin_shader("analyze_brightness.frag.glsl")
self.target_apply.shader = self.load_plugin_shader("apply_exposure.frag.glsl")
# Keep shader as reference, required when resizing
self.mip_shader = self.load_plugin_shader("downscale_luminance.frag.glsl")
for target in self.mip_targets:
target.shader = self.mip_shader
|
{
"content_hash": "88e91f9f08b9b2a3c08678dcaabd0666",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 93,
"avg_line_length": 41.13157894736842,
"alnum_prop": 0.6811686926850075,
"repo_name": "eswartz/RenderPipeline",
"id": "bd116c91a51c95a321c540a876c330ccffce2c89",
"size": "4689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpplugins/color_correction/auto_exposure_stage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1241"
},
{
"name": "C",
"bytes": "21397"
},
{
"name": "C++",
"bytes": "160537"
},
{
"name": "GLSL",
"bytes": "712004"
},
{
"name": "Groff",
"bytes": "114"
},
{
"name": "Python",
"bytes": "1374140"
}
],
"symlink_target": ""
}
|
KUBERNETES_DATASOURCE = 'kubernetes'
class KubernetesProperties(object):
NAME = 'name'
METADATA = 'metadata'
NETWORK = 'network'
ADDRESS = 'address'
STATUS = 'status'
ADDRESSES = 'addresses'
TYPE = 'type'
INTERNALIP = 'internal_ip'
EXTERNALIP = 'external_ip'
UID = 'uid'
EXTERNALID = 'external_id'
PROVIDERID = 'provider_id'
PROVIDER_NAME = 'provider_name'
SPEC = 'spec'
CREATION_TIMESTAMP = 'creation_timestamp'
RESOURCES = 'resources'
|
{
"content_hash": "cd0acefbe61e20be4c43d49a03d41a8a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 45,
"avg_line_length": 25.15,
"alnum_prop": 0.6381709741550696,
"repo_name": "openstack/vitrage",
"id": "9062669f40fac4c59d4beac8527c38e62042ef06",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/datasources/kubernetes/properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
}
|
import logging
from unittest.mock import MagicMock
import numpy as np
from tf_bodypix.model import BodyPixModelWrapper
LOGGER = logging.getLogger(__name__)
ANY_INT_FACTOR_1 = 5
class TestBodyPixModelWrapper:
def test_should_be_able_to_padded_and_resized_image_matching_output_stride_plus_one(self):
predict_fn = MagicMock(name='predict_fn')
output_stride = 16
internal_resolution = 0.5
model = BodyPixModelWrapper(
predict_fn=predict_fn,
output_stride=output_stride,
internal_resolution=internal_resolution
)
default_tensor_names = {
'float_segments',
'float_part_heatmaps',
'float_heatmaps',
'float_short_offsets',
'float_long_offsets',
'float_part_offsets',
'displacement_fwd',
'displacement_bwd'
}
predict_fn.return_value = {
key: np.array([])
for key in default_tensor_names
}
resolution_matching_output_stride_plus_1 = int(
(output_stride * ANY_INT_FACTOR_1 + 1) / internal_resolution
)
LOGGER.debug(
'resolution_matching_output_stride_plus_1: %s',
resolution_matching_output_stride_plus_1
)
image = np.ones(
shape=(
resolution_matching_output_stride_plus_1,
resolution_matching_output_stride_plus_1,
3
)
)
model.predict_single(image)
|
{
"content_hash": "fe15f46a3d479e07e95b4040176705f8",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 94,
"avg_line_length": 29.132075471698112,
"alnum_prop": 0.5699481865284974,
"repo_name": "de-code/python-tf-bodypix",
"id": "a7f35745922c2f784563056d48c98f0b2f186a26",
"size": "1544",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/model_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "820"
},
{
"name": "Makefile",
"bytes": "8075"
},
{
"name": "Python",
"bytes": "119244"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
}
|
"""add password
Revision ID: 578150ae72c8
Revises: a4ddb77c0941
Create Date: 2017-02-07 19:02:35.725606
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '578150ae72c8'
down_revision = 'a4ddb77c0941'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'password_hash')
# ### end Alembic commands ###
|
{
"content_hash": "730fe447fee66b26608c2371018c5289",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 92,
"avg_line_length": 24,
"alnum_prop": 0.6875,
"repo_name": "hedm0423/flaskdemo",
"id": "ac31178715bfd78014f281265ab4e090fc0a4fb6",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/578150ae72c8_add_password.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3711"
},
{
"name": "HTML",
"bytes": "14769"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "42226"
}
],
"symlink_target": ""
}
|
import graphene
from .mutations import TaxExemptionManage
class TaxMutations(graphene.ObjectType):
tax_exemption_manage = TaxExemptionManage.Field()
|
{
"content_hash": "ba062147e6892dd8826f032704f4a5be",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 22.285714285714285,
"alnum_prop": 0.8141025641025641,
"repo_name": "mociepka/saleor",
"id": "4f56cf5f7cc7b6e8f888de2d2e9f9206cb1973fc",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/tax/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
import os
import warnings
qt_api = os.environ.get('QT_API')
if qt_api is None:
try:
import PySide
qt_api = 'pyside'
except ImportError:
try:
import PyQt4
qt_api = 'pyqt'
except ImportError:
qt_api = None
# Note that we don't want to raise an error because that would
# cause the TravisCI build to fail.
warnings.warn("Could not import PyQt4: ImageViewer not available!")
if qt_api is not None:
os.environ['QT_API'] = qt_api
|
{
"content_hash": "0e12dbf81ba6ecc472d000b57e0d3363",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 24.727272727272727,
"alnum_prop": 0.5753676470588235,
"repo_name": "almarklein/scikit-image",
"id": "8e7ab93952eccd09c61a59e2eb406ce6a175d1e1",
"size": "544",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "skimage/viewer/qt/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "69944"
},
{
"name": "CSS",
"bytes": "3629"
},
{
"name": "JavaScript",
"bytes": "777"
},
{
"name": "Python",
"bytes": "1944172"
},
{
"name": "Shell",
"bytes": "3346"
}
],
"symlink_target": ""
}
|
__author__ = 'cenk'
def check_vertical(cells):
for (i, j) in cells:
if (i, j + 1) in cells and (i, j + 2) in cells:
return True
return False
def check_horizontal(cells):
for (i, j) in cells:
if (i + 1, j) in cells and (i + 2, j) in cells:
return True
return False
def check_cross(cells):
for (i, j) in cells:
if (i + 1, j + 1) in cells and (i + 2, j + 2) in cells:
return True
if (i - 1, j + 1) in cells and (i - 2, j + 2) in cells:
return True
return False
def is_finish(cells):
if check_vertical(cells) or check_horizontal(cells) or check_cross(cells):
return True
return False
|
{
"content_hash": "c4c5c557df4136013af39f5040b01eee",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 22.870967741935484,
"alnum_prop": 0.53737658674189,
"repo_name": "cenkbircanoglu/tic-tac-toe",
"id": "5f87a1745d8d11ac9c2894cd821dd60fd92a5e1b",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/algorithm/is_finish.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "475"
},
{
"name": "HTML",
"bytes": "3312"
},
{
"name": "JavaScript",
"bytes": "5593"
},
{
"name": "Python",
"bytes": "36543"
}
],
"symlink_target": ""
}
|
"""
Text Sentiment Calculator w/ GUI
Authors: Chris Cintron, Matt Novak
Created April 2016
"""
from os import popen
import xlwt
import openpyxl
from tkinter import Button, Label, Grid, Tk
from tkinter.filedialog import askopenfilename, asksaveasfile, askdirectory
import re
def openmyfolder():
global rawInput
filename = askopenfilename(title= "Pick a .txt file to analyze")
InputFile = open(filename)
rawInput = InputFile.read()
InputFile.close()
return rawInput
def openmyfolder2():
filename = askopenfilename(title= "Select the DDods and Warriner Dictionaires")
global wb1
wb1 = openpyxl.load_workbook(filename)
def main():
#Inputs for Original Dict
#global totalwrdcount
global uniqwordcount
global modeValue
global modeKey
#Inputs for Dodds
global freqLabMTWords
global uniqLabMTWords
global modeLabMTWordsValue
global meanLabMTWords
global modeLabMTWords
#Inputs for Warriner
global freqWarriner
global uniqWarriner
global modeWarrinerValue
global meanWarriner
global modeWarriner
xlSheet1 = wb1.get_sheet_by_name('labMTwords-English')
xlSheet2 = wb1.get_sheet_by_name("Warriner-English")
#import happiness scales as dictionaries
dictLabMTWords = {}
dictWarriner = {}
tempRowNum = xlSheet1.max_row + 1
for i in range(1, tempRowNum):
Key = xlSheet1.cell(row=i, column=1).value #cell Ai
Value = xlSheet1.cell(row=i , column=2).value #cell Bi
dictLabMTWords.update({ Key : Value }) #Adds them to dictionary
tempRowNum = xlSheet2.max_row + 1
for i in range(1, tempRowNum):
Key = xlSheet2.cell(row=i, column=1).value #cell Ai
Value = xlSheet2.cell(row=i , column=2).value #cell Bi
dictWarriner.update({ Key : Value }) #Adds them to dictionary
#Remove everycharacter EXCEPT a-z, A-Z, and whitespace
pattern = re.compile('\w+')
results = re.sub(r'[^a-zA-Z\s]', '', rawInput)
new_rawInput = results.lower()
finalInput = pattern.findall(new_rawInput)
#Loops through each item in list and adds it into dictionary. Adds + 1.
#If already in dictionary, adds (the value) + 1
dictInput = {}
for character in finalInput:
dictInput.setdefault(character, 0)
dictInput[character] = dictInput[character] + 1
uniqwordcount = len(dictInput)
totalwordcount = 0
for i in dictInput:
totalwordcount += dictInput[i]
#find the mode
modeKey = []
modeValue = 0
for i in dictInput:
if dictInput[i] == modeValue:
modeKey.append(i)
elif dictInput[i] > modeValue:
modeKey[:] = []
modeValue = dictInput[i]
modeKey.append(i)
#Variables for Dodds
modeLabMTWords = []
modeLabMTWordsValue = 0
sumLabMTWords = 0
freqLabMTWords = 0
uniqLabMTWords = 0
dodds_orphanlist = []
for i in dictInput:
if i in dictLabMTWords.keys():
sumLabMTWords += dictLabMTWords[i] * dictInput[i]
freqLabMTWords += dictInput[i]
uniqLabMTWords += 1
if dictInput[i] == modeLabMTWordsValue:
modeLabMTWords.append(i)
elif dictInput[i] > modeLabMTWordsValue:
modeLabMTWords[:] = []
modeLabMTWordsValue = dictInput[i]
modeLabMTWords.append(i)
else: #Add to orphan list
dodds_orphanlist.append(i)
meanLabMTWords = sumLabMTWords / freqLabMTWords
#Variables for Warriner
modeWarriner = []
modeWarrinerValue = 0
sumWarriner = 0
freqWarriner = 0
uniqWarriner = 0
warriner_orphanlist = []
for i in dictInput:
if i in dictWarriner.keys():
sumWarriner += dictWarriner[i] * dictInput[i]
freqWarriner += dictInput[i]
uniqWarriner += 1
if dictInput[i] == modeWarrinerValue:
modeWarriner.append(i)
elif dictInput[i] > modeWarrinerValue:
modeWarriner[:] = []
modeWarrinerValue = dictInput[i]
modeWarriner.append(i)
else: #Add to orphan list
warriner_orphanlist.append(i)
meanWarriner = sumWarriner / freqWarriner
ResultsLabel = Label(root, text="Results", bg="blue", fg="white")
ResultsLabel.grid(row=5, column=0)
v51 = Label(root, text="Total", bg="white", fg="black")
v51.grid(row=5, column=1)
v52 = Label(root, text="Dodds", bg="white", fg="black")
v52.grid(row=5, column=2)
v53 = Label(root, text="Warriner", bg="white", fg="black")
v53.grid(row=5,column=3)
#Total Word Count Row
v60 = Label(root, text="Total Word Count", bg="white", fg="black")
v60.grid(row=6, column=0)
v61 = Label(root, text= totalwordcount, bg="white", fg="black")
v61.grid(row=6,column=1)
v62 = Label(root, text= freqLabMTWords, bg="white", fg="black")
v62.grid(row=6,column=2)
v63 = Label(root, text= freqWarriner, bg="white", fg="black")
v63.grid(row=6,column=3)
#Total Unique Words Row
v70 = Label(root, text="Total Unique Words", bg="white", fg="black")
v70.grid(row=7, column=0)
v71 = Label(root, text= uniqwordcount, bg="white", fg="black")
v71.grid(row=7, column=1)
v72 = Label(root, text= uniqLabMTWords, bg="white", fg="black")
v72.grid(row=7, column=2)
v73 = Label(root, text= uniqWarriner, bg="white", fg="black")
v73.grid(row=7, column=3)
#Mode Row
v80 = Label(root, text="Mode", bg="white", fg="black")
v80.grid(row=8, column=0)
v81 = Label(root, text= modeValue, bg="white", fg="black")
v81.grid(row=8, column=1)
v82 = Label(root, text= modeLabMTWordsValue, bg="white", fg="black")
v82.grid(row=8, column=2)
v83 = Label(root, text= modeWarrinerValue, bg="white", fg="black")
v83.grid(row=8, column=3)
#Mean Row
v90 = Label(root, text="Mean", bg="white", fg="black")
v90.grid(row=9, column= 0)
v91 = Label(root, text="N/a", bg="white", fg="black")
v91.grid(row=9, column=1)
v92 = Label(root, text= meanLabMTWords, bg="white", fg="black")
v92.grid(row=9, column=2)
v93 = Label(root, text= meanWarriner, bg="white", fg="black")
v93.grid(row=9, column=3)
#Mode Word
v10 = Label(root, text="Mode Word", bg="white", fg="black")
v10.grid(row=10, column=0)
v11 = Label(root, text= modeKey, bg="white", fg="black")
v11.grid(row=10, column=1)
v12 = Label(root, text= modeLabMTWords, bg="white", fg="black")
v12.grid(row=10, column=2)
v13 = Label(root, text= modeWarriner, bg="white", fg="black")
v13.grid(row=10, column=3)
wb = xlwt.Workbook()
ws = wb.add_sheet('Results')
#Dictionaries into excel
rownum = 2
for i in dictInput:
ws.write(rownum, 0, i)
ws.write(rownum, 1, dictInput[i])
rownum += 1
### Writes to Excel, probably will delete ###
#Headers
ws.write(0, 0, "Total Words")
ws.write(0, 1, "Frequency")
ws.write(0, 3, "Dodds Orphan Words")
ws.write(0, 4, "Warriner Orphan Words")
ws.write(2, 3, str(dodds_orphanlist))
ws.write(2, 4, str(warriner_orphanlist))
#Table for Key Indicator
#Rows
ws.write(0, 10, "Total")
ws.write(0, 11, "Dodds")
ws.write(0, 12, "Warriner")
#Collumns
ws.write(1, 9, "Total Word Count")
ws.write(2, 9, "Total Unique word count")
ws.write(3, 9, "Mode")
ws.write(4, 9, "Mean")
ws.write(4, 10, "N/a")
ws.write(5, 9, "Mode Word(s)")
#Inputs for Original Dict
ws.write(1, 10, totalwordcount)
ws.write(2, 10, uniqwordcount)
ws.write(3, 10, modeValue)
ws.write(5, 10, str(modeKey))
#inputs for Dodds
ws.write(1, 11, freqLabMTWords)
ws.write(2, 11, uniqLabMTWords)
ws.write(3, 11, modeLabMTWordsValue)
ws.write(4, 11, meanLabMTWords)
ws.write(5, 11, str(modeLabMTWords))
#inputs for Warriner
ws.write(1, 12, freqWarriner)
ws.write(2, 12, uniqWarriner)
ws.write(3, 12, modeWarrinerValue)
ws.write(4, 12, meanWarriner)
ws.write(5, 12, str(modeWarriner))
#Save into selected directory
your_dir = askdirectory(title= "Where would you like to save this?")
wb.save(str(your_dir) + "/word_sentiment_results.xls")
#Window size and title
root=Tk()
root.geometry("480x250+400+200")
root.title("Text Sentiment Analyzer")
#Buttons
readbutton = Button(root, text="Open .txt file", bg="white", fg="black", command= openmyfolder)
readbutton.grid(row=1) #Used to find .txt file
readbutton = Button(root, text="Open Excel dictionary", bg="white", fg="black", command= openmyfolder2)
readbutton.grid(row=1, column=2) #Used to find amels_dictionaries
runbutton = Button(root, text="Run", bg="white", fg="red", command= main)
runbutton.grid(row=1, column=3) #Invokes main() function to run code
#Random Labels
ResultsLabel = Label(root, text="Results", bg="blue", fg="white")
ResultsLabel.grid(row=5, column=0)
root.mainloop()
|
{
"content_hash": "d14e70cef2eef146127386445671130f",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 103,
"avg_line_length": 29.75328947368421,
"alnum_prop": 0.6332780541735765,
"repo_name": "ChrisCintron/Text_Sentiment_Analyzer",
"id": "43b8852e4b68eeabd119ce814e33dd3bc88984fd",
"size": "9093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9093"
}
],
"symlink_target": ""
}
|
import ast, sys, time
import argparse
import numpy as np
from StrongCNN.IO.config_parser import parse_configfile
from StrongCNN.IO.load_images import load_data
from StrongCNN.IO.augment_data import augment_data
from StrongCNN.utils.model_info import roc_auc, roc_curve_plot, get_scores
from StrongCNN.utils.model_info import model_coeff_plot
from _tools import generate_X_y, load_model, get_false_predictions_list, get_filenames_in_threshold_range, generate_X_scores
'''
Get the score and false ids of the model on either the training set or the test set
'''
parser = argparse.ArgumentParser()
parser.add_argument('cfgdir')
parser.add_argument('set_name')
parser.add_argument('-p', '--roc_plot_filename', required = False)
parser.add_argument('-c', '--model_coeff_plot_filename', required = False)
parser.add_argument('-r', '--roc_data_filename', required = False)
parser.add_argument('-t', '--tpr_filename', required = False)
parser.add_argument('-s', '--filenames_scores', required = False )
parser.add_argument('-T', '--time', required = False )
args = vars(parser.parse_args())
cfgdir = args['cfgdir']
set_name = args['set_name']
cfg = parse_configfile(cfgdir)
if args['time'] is not None : start_time = time.time()
else :
print "Time is not on!"
sys.exit()
assert(set_name in ['test','train'])
# Collect testing data
X_test, y_test, filenames = load_data(cfg[set_name+'_filenames']['non_lens_glob'],
cfg[set_name+'_filenames']['lens_glob'])
if 'augment_'+set_name+'_data' in cfg.keys() :
X_test, y_test = augment_data( X_test, y_test,
cfg['augment_'+set_name+'_data']['method_label'],
**ast.literal_eval(cfg['augment_'+set_name+'_data']['method_kwargs']))
print "len(X_test) =", len(X_test)
print "len(y_test) =", len(y_test)
trained_model = load_model(cfgdir+'/'+cfg['model']['pklfile'])
print set_name+' filename glob', cfg[set_name+'_filenames']['non_lens_glob'], cfg[set_name+'_filenames']['lens_glob']
print ''
print 'Testing model parameter grid:'
for k,v in cfg['param_grid'].iteritems() :
print k, v
print ''
if cfg[set_name+'_filenames']['lens_glob'] != '' and cfg[set_name+'_filenames']['non_lens_glob'] != '' :
print 'False predictions: '
#print get_false_predictions_list(trained_model, X_test, y_test, filenames)
print ''
print 'AUC =', roc_auc(trained_model, X_test, y_test)
print ''
if args['tpr_filename'] is not None :
tpr_min, tpr_max = 0., 1.
fpr_min, fpr_max = 0., 1.
filenames_in_tpr, filenames_in_fpr = get_filenames_in_threshold_range(trained_model, X_test, y_test,
filenames, (tpr_min,tpr_max),
(fpr_min, fpr_max) )
np.savetxt(args['tpr_filename'],np.array(filenames_in_tpr),fmt='%s %s %s %s %s',
header="# filename score label tpr fpr")
if args['roc_plot_filename'] is not None :
roc_data = roc_curve_plot(trained_model, X_test, y_test,
args['roc_plot_filename'])
if args['roc_data_filename'] is not None :
np.savetxt(args['roc_data_filename'],
np.asarray(roc_data).transpose())
if args['model_coeff_plot_filename'] is not None :
model_coeff_plot(trained_model.steps[-1][1],
args['model_coeff_plot_filename'])
if args['filenames_scores'] is not None :
X_length = len(X_test)
assert( X_length == len(y_test) )
assert( X_length/4 == len(filenames) )
print "Length of X and y: ", X_length
for i in range(4) :
np.savetxt( args['filenames_scores'].split('.txt')[0]+'_'+set_name+str(i)+'.txt',
np.asarray(generate_X_scores( trained_model, X_test[i*X_length/4:(i+1)*X_length/4],
y_test[i*X_length/4:(i+1)*X_length/4], filenames )).transpose(),
fmt='%s %s %s', header='filename score label',comments='' )
if args['time'] is not None : print 'Tested '+set_name+' set. Time taken:', time.time() - start_time
|
{
"content_hash": "dea9c0396d60da7dc19dd50cee7b10c7",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 124,
"avg_line_length": 41.23300970873787,
"alnum_prop": 0.5959500824111137,
"repo_name": "cavestruz/StrongCNN",
"id": "491c52cb80382cc59a72240a232ccb70796673c7",
"size": "4247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/test_trained_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7822"
},
{
"name": "Python",
"bytes": "94637"
},
{
"name": "Shell",
"bytes": "4089"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import unittest
from decimal import Decimal
from djmoney_rates.backends import BaseRateBackend
from djmoney_rates.exceptions import CurrencyConversionException
from djmoney_rates.models import RateSource, Rate
from djmoney_rates.settings import money_rates_settings
from djmoney_rates.utils import base_convert_money, convert_money
import moneyed
class TestMoneyConverter(unittest.TestCase):
def setUp(self):
class RateBackend(BaseRateBackend):
source_name = "fake-backend"
base_currency = "USD"
money_rates_settings.DEFAULT_BACKEND = RateBackend
def tearDown(self):
RateSource.objects.all().delete()
Rate.objects.all().delete()
def test_base_conversion_fail_when_source_does_not_exist(self):
with self.assertRaises(CurrencyConversionException) as cm:
base_convert_money(10.0, "PLN", "EUR")
self.assertIn("Rate for fake-backend source do not exists", str(cm.exception))
def test_base_conversion_fail_when_currency_from_does_not_exist(self):
RateSource.objects.create(name="fake-backend")
with self.assertRaises(CurrencyConversionException) as cm:
base_convert_money(10.0, "PLN", "EUR")
self.assertIn("Rate for PLN in fake-backend do not exists", str(cm.exception))
def test_base_conversion_fail_when_currency_to_does_not_exist(self):
source = RateSource.objects.create(name="fake-backend")
Rate.objects.create(source=source, currency="PLN", value=0.99999)
with self.assertRaises(CurrencyConversionException) as cm:
base_convert_money(10.0, "PLN", "EUR")
self.assertIn("Rate for EUR in fake-backend do not exists", str(cm.exception))
def test_base_conversion_works_from_base_currency(self):
source = RateSource.objects.create(name="fake-backend", base_currency="USD")
Rate.objects.create(source=source, currency="USD", value=1)
Rate.objects.create(source=source, currency="EUR", value=0.74)
amount = base_convert_money(1, "USD", "EUR")
self.assertEqual(amount, Decimal("0.74"))
def test_base_conversion_is_working_from_other_currency(self):
source = RateSource.objects.create(name="fake-backend", base_currency="USD")
Rate.objects.create(source=source, currency="PLN", value=3.07)
Rate.objects.create(source=source, currency="EUR", value=0.74)
amount = base_convert_money(10.0, "PLN", "EUR")
self.assertEqual(amount, Decimal("2.41"))
def test_conversion_fail_when_source_does_not_exist(self):
with self.assertRaises(CurrencyConversionException) as cm:
convert_money(10.0, "PLN", "EUR")
self.assertIn("Rate for fake-backend source do not exists", str(cm.exception))
def test_conversion_fail_when_currency_from_does_not_exist(self):
RateSource.objects.create(name="fake-backend")
with self.assertRaises(CurrencyConversionException) as cm:
convert_money(10.0, "PLN", "EUR")
self.assertIn("Rate for PLN in fake-backend do not exists", str(cm.exception))
def test_conversion_fail_when_currency_to_does_not_exist(self):
source = RateSource.objects.create(name="fake-backend")
Rate.objects.create(source=source, currency="PLN", value=0.99999)
with self.assertRaises(CurrencyConversionException) as cm:
convert_money(10.0, "PLN", "EUR")
self.assertIn("Rate for EUR in fake-backend do not exists", str(cm.exception))
def test_conversion_works_from_base_currency(self):
source = RateSource.objects.create(name="fake-backend", base_currency="USD")
Rate.objects.create(source=source, currency="USD", value=1)
Rate.objects.create(source=source, currency="EUR", value=0.74)
amount = convert_money(1, "USD", "EUR")
self.assertEqual(type(amount), moneyed.Money)
self.assertEqual(amount, moneyed.Money(Decimal("0.74"), "EUR"))
def test_conversion_is_working_from_other_currency(self):
source = RateSource.objects.create(name="fake-backend", base_currency="USD")
Rate.objects.create(source=source, currency="PLN", value=3.07)
Rate.objects.create(source=source, currency="EUR", value=0.74)
amount = convert_money(10.0, "PLN", "EUR")
self.assertEqual(amount, moneyed.Money(Decimal("2.41"), "EUR"))
|
{
"content_hash": "95fda597ac4f7e3149b4c3366764a687",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 86,
"avg_line_length": 41.52336448598131,
"alnum_prop": 0.6851226648660814,
"repo_name": "iXioN/django-money-rates",
"id": "e67ef34c69520eea1602f58361ccc3851c0386f1",
"size": "4443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29029"
}
],
"symlink_target": ""
}
|
import os.path as op
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_allclose
from mne import Epochs, read_evokeds, pick_types
from mne.io.compensator import make_compensator, get_current_comp
from mne.io import Raw
from mne.utils import _TempDir, requires_mne, run_subprocess
base_dir = op.join(op.dirname(__file__), 'data')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
tempdir = _TempDir()
def test_compensation():
"""Test compensation
"""
raw = Raw(ctf_comp_fname, compensation=None)
comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
assert_true(comp1.shape == (340, 340))
comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
assert_true(comp2.shape == (311, 340))
# make sure that changing the comp doesn't modify the original data
raw2 = Raw(ctf_comp_fname, compensation=2)
assert_true(get_current_comp(raw2.info) == 2)
fname = op.join(tempdir, 'ctf-raw.fif')
raw2.save(fname)
raw2 = Raw(fname, compensation=None)
data, _ = raw[:, :]
data2, _ = raw2[:, :]
assert_allclose(data, data2, rtol=1e-9, atol=1e-20)
for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']):
assert_true(ch1['coil_type'] == ch2['coil_type'])
@requires_mne
def test_compensation_mne():
"""Test comensation by comparing with MNE
"""
def make_evoked(fname, comp):
raw = Raw(fname, compensation=comp)
picks = pick_types(raw.info, meg=True, ref_meg=True)
events = np.array([[0, 0, 1]], dtype=np.int)
evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks).average()
return evoked
def compensate_mne(fname, comp):
tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
cmd = ['mne_compensate_data', '--in', fname,
'--out', tmp_fname, '--grad', str(comp)]
run_subprocess(cmd)
return read_evokeds(tmp_fname)[0]
# save evoked response with default compensation
fname_default = op.join(tempdir, 'ctf_default-ave.fif')
make_evoked(ctf_comp_fname, None).save(fname_default)
for comp in [0, 1, 2, 3]:
evoked_py = make_evoked(ctf_comp_fname, comp)
evoked_c = compensate_mne(fname_default, comp)
picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True)
picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True)
assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c],
rtol=1e-3, atol=1e-17)
|
{
"content_hash": "ae79a8c4495a106f2a3be39807c7686c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 37.492537313432834,
"alnum_prop": 0.6385350318471338,
"repo_name": "jaeilepp/eggie",
"id": "3620c86f65021ba012719849d04e48789ffc29ac",
"size": "2612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/io/tests/test_compensator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3357472"
}
],
"symlink_target": ""
}
|
import unittest
import uuid
import mock
from changes.utils.http import build_patch_uri
class TestBuildPatchUri(unittest.TestCase):
def test_internal(self):
patch_id = uuid.UUID('ee62b37b-bc3f-4efe-83bc-f75152d60405')
app = mock.Mock(config={'INTERNAL_BASE_URI': 'https://base_uri/'})
uri = build_patch_uri(patch_id, app)
assert uri == 'https://base_uri/api/0/patches/{0}/?raw=1'.format(
patch_id.hex)
def test_use_patch(self):
patch_id = uuid.UUID('ee62b37b-bc3f-4efe-83bc-f75152d60405')
app = mock.Mock(config={
'INTERNAL_BASE_URI': 'https://base_uri/',
'PATCH_BASE_URI': 'https://patch_uri/'
})
uri = build_patch_uri(patch_id, app)
assert uri == 'https://patch_uri/api/0/patches/{0}/?raw=1'.format(
patch_id.hex)
|
{
"content_hash": "c402a35bac6118d5755e68627ddf0dc6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 35.291666666666664,
"alnum_prop": 0.6080283353010626,
"repo_name": "dropbox/changes",
"id": "827f3de96bb9ef985dd6bfbda90e32d6d55628a9",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/changes/utils/test_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24837"
},
{
"name": "HTML",
"bytes": "21274"
},
{
"name": "JavaScript",
"bytes": "380548"
},
{
"name": "Makefile",
"bytes": "6148"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2189624"
},
{
"name": "Shell",
"bytes": "4150"
}
],
"symlink_target": ""
}
|
"""The module provides connection and connections pool interfaces.
These are intended to be used for implementing custom connection managers.
"""
import abc
import asyncio
from abc import ABC
__all__ = [
'AbcConnection',
'AbcPool',
'AbcChannel',
]
class AbcConnection(ABC):
"""Abstract connection interface."""
@abc.abstractmethod
def execute(self, command, *args, **kwargs):
"""Execute redis command."""
@abc.abstractmethod
def execute_pubsub(self, command, *args, **kwargs):
"""Execute Redis (p)subscribe/(p)unsubscribe commands."""
@abc.abstractmethod
def close(self):
"""Perform connection(s) close and resources cleanup."""
@asyncio.coroutine
@abc.abstractmethod
def wait_closed(self):
"""
Coroutine waiting until all resources are closed/released/cleaned up.
"""
@property
@abc.abstractmethod
def closed(self):
"""Flag indicating if connection is closing or already closed."""
@property
@abc.abstractmethod
def db(self):
"""Current selected DB index."""
@property
@abc.abstractmethod
def encoding(self):
"""Current set connection codec."""
@property
@abc.abstractmethod
def in_pubsub(self):
"""Returns number of subscribed channels.
Can be tested as bool indicating Pub/Sub mode state.
"""
@property
@abc.abstractmethod
def pubsub_channels(self):
"""Read-only channels dict."""
@property
@abc.abstractmethod
def pubsub_patterns(self):
"""Read-only patterns dict."""
@property
@abc.abstractmethod
def address(self):
"""Connection address."""
class AbcPool(AbcConnection):
"""Abstract connections pool interface.
Inherited from AbcConnection so both have common interface
for executing Redis commands.
"""
@abc.abstractmethod
def get_connection(self): # TODO: arguments
"""
Gets free connection from pool in a sync way.
If no connection available — returns None.
"""
@asyncio.coroutine
@abc.abstractmethod
def acquire(self): # TODO: arguments
"""Acquires connection from pool."""
@abc.abstractmethod
def release(self, conn): # TODO: arguments
"""Releases connection to pool.
:param AbcConnection conn: Owned connection to be released.
"""
@property
@abc.abstractmethod
def address(self):
"""Connection address or None."""
class AbcChannel(ABC):
"""Abstract Pub/Sub Channel interface."""
@property
@abc.abstractmethod
def name(self):
"""Encoded channel name or pattern."""
@property
@abc.abstractmethod
def is_pattern(self):
"""Boolean flag indicating if channel is pattern channel."""
@property
@abc.abstractmethod
def is_active(self):
"""Flag indicating that channel has unreceived messages
and not marked as closed."""
@asyncio.coroutine
@abc.abstractmethod
def get(self):
"""Wait and return new message.
Will raise ``ChannelClosedError`` if channel is not active.
"""
# wait_message is not required; details of implementation
# @abc.abstractmethod
# def wait_message(self):
# pass
@abc.abstractmethod
def put_nowait(self, data):
"""Send data to channel.
Called by RedisConnection when new message received.
For pattern subscriptions data will be a tuple of
channel name and message itself.
"""
@abc.abstractmethod
def close(self, exc=None):
"""Marks Channel as closed, no more messages will be sent to it.
Called by RedisConnection when channel is unsubscribed
or connection is closed.
"""
|
{
"content_hash": "9c9b4f6b114d29ef1aaf23a8b77430af",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 77,
"avg_line_length": 24.150943396226417,
"alnum_prop": 0.634375,
"repo_name": "ymap/aioredis",
"id": "0af13c65c9272f189615f3120b8c27604e8abdb2",
"size": "3842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aioredis/abc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2781"
},
{
"name": "Python",
"bytes": "556618"
}
],
"symlink_target": ""
}
|
import time
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core.urlresolvers import reverse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from tests.utils.factories import AdminUserFactory, ServiceFactory, LogFactory, NotificationFactory
from ftp_deploy.models import Service
class ViewsTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.PhantomJS()
self.browser.implicitly_wait(10)
self.wait = WebDriverWait(self.browser, 10)
def tearDown(self):
self.browser.quit()
def user_authenticate(self):
AdminUserFactory()
self.browser.get(self.live_server_url + reverse('ftpdeploy_login'))
self.wait.until(lambda browser: browser.find_element_by_tag_name('form'))
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('admin')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('admin')
password_field.send_keys(Keys.RETURN)
self.wait_for_load()
time.sleep(1)
def wait_for_load(self):
return self.wait.until(lambda browser: browser.find_element_by_tag_name('header'))
def test_top_navigation(self):
self.user_authenticate()
# User click 'Service' link and see 'Services Dashboard' header
navbar = self.browser.find_element_by_class_name('navbar')
navbar.find_element_by_link_text('Services').click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Services Dashboard', page_header)
# User click 'Notification' link and see 'Notificaions Dashboard' header
navbar = self.browser.find_element_by_class_name('navbar')
navbar.find_element_by_link_text('Notifications').click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Notifications Dashboard', page_header)
# User click 'Add Service' link and see service form
navbar = self.browser.find_element_by_class_name('navbar')
navbar.find_element_by_link_text('Add').click()
navbar.find_element_by_link_text('Add Service').click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Service Add', page_header)
# User click 'Add Notification' link and see notification form
navbar = self.browser.find_element_by_class_name('navbar')
navbar.find_element_by_link_text('Add').click()
navbar.find_element_by_link_text('Add Notification').click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Notification Add', page_header)
def test_can_see_login_screen_and_log_in(self):
# User installed ftp deploy app and visit app login screen in order to log in
# He notice Login text on page as well
self.browser.get(self.live_server_url + reverse('ftpdeploy_login'))
self.wait.until(lambda browser: browser.find_element_by_tag_name('form'))
body = self.browser.find_element_by_tag_name('body').text
self.assertIn('Login', body)
# He type data into username and password but make a typo,
# that couse comes up error message box
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('admin')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('admin1')
password_field.send_keys(Keys.RETURN)
self.wait.until(lambda browser: browser.find_element_by_class_name('alert-danger'), 'Error message after fail login attempt')
# At third attempt he make it! type proper username and password, login
# and redirect to dashboard page
AdminUserFactory()
username_field = self.browser.find_element_by_name('username')
username_field.send_keys('admin')
password_field = self.browser.find_element_by_name('password')
password_field.send_keys('admin')
password_field.send_keys(Keys.RETURN)
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Services Dashboard', page_header)
# When he try visit login page after success login, he is redirect to dashboard page again
self.browser.get(self.live_server_url + reverse('ftpdeploy_login'))
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Services Dashboard', page_header)
def test_can_log_out(self):
self.user_authenticate()
# User click logout link and redirect to login screen
self.browser.find_element_by_xpath("//a[@href='%s']" % reverse('ftpdeploy_logout')).click()
self.wait.until(lambda browser: browser.find_element_by_tag_name('form'))
# After visit login screen again he stay on the login page
self.browser.get(self.live_server_url + reverse('ftpdeploy_login'))
self.wait.until(lambda browser: browser.find_element_by_tag_name('form'))
body = self.browser.find_element_by_tag_name('body').text
self.assertIn('Login', body)
def test_can_see_notification_page_and_manage_notifications(self):
self.user_authenticate()
# User go to notification page by click Notification link on left hand side menu
page = self.browser.find_element_by_id('page')
page.find_element_by_link_text("Notifications").click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
# He can see Notification Dashboard title
self.assertIn('Notifications Dashboard', page_header)
# He is welcome with message and see big 'Add Notification' button
page = self.browser.find_element_by_id('page')
page.find_element_by_class_name('well')
add_notification_btn = self.browser.find_element_by_class_name('btn-success')
self.assertIn('Add Notification', add_notification_btn.text)
# He click Add notification button and see form with option to add new email, and two others DEPLOY USER and COMMIT USER
add_notification_btn.click()
self.wait_for_load()
self.assertIn('Notification Add', self.browser.find_element_by_class_name('page-header').text)
form = self.browser.find_element_by_tag_name('form')
self.assertIn('DEPLOY USER', form.text)
self.assertIn('COMMIT USER', form.text)
# He name notification entry - Default.
name_field = form.find_element_by_name('name')
name_field.send_keys('Default')
# He add email but accidently input invalid emial and input error state. Next he add three correct emails
email_input = form.find_element_by_id('email')
add_email_btn = form.find_element_by_link_text('Add Email')
email_input.send_keys('email1.emai.com')
add_email_btn.click()
self.wait_for_load()
email_input.clear()
email_input.send_keys('email1@emal.com')
add_email_btn.click()
email_input.send_keys('email2@emal.com')
add_email_btn.click()
email_input.send_keys('email3@emal.com')
add_email_btn.click()
# He can see all emails on the list
self.assertIn('email1@emal.com', form.text)
self.assertIn('email2@emal.com', form.text)
self.assertIn('email3@emal.com', form.text)
# He decided he doesn't need last email and click remove button next to the third email,
# and noticed email is not on the list any more
last_email_row = self.browser.find_element_by_xpath("//form/table/tbody/tr[3]")
last_email_row.find_element_by_class_name('remove').click()
self.assertNotIn('email3@emal.com', form.text)
# He untick SUCCESS and first email
first_email_row = self.browser.find_element_by_xpath("//form/table/tbody/tr[1]")
first_email_row.find_element_by_name("_success").click()
# He untick SUCCESS and FAIL for second email
second_email_row = self.browser.find_element_by_xpath("//form/table/tbody/tr[2]")
second_email_row.find_element_by_name("_success").click()
second_email_row.find_element_by_name("_fail").click()
# He tick DEPLOY USER - SUCCESS, and COMMIT USER - FAIL and save
form.find_element_by_id('id_deploy_user_1').click()
form.find_element_by_id('id_commit_user_2').click()
form.find_element_by_xpath("//button[@type='submit']").click()
self.wait_for_load()
# He noticed success message after success submit
self.browser.find_element_by_class_name('alert-success')
# He can see Default notification title on the list
table = self.browser.find_element_by_class_name('table')
self.assertIn('Default', table.text)
# He click edit notification button to confirm all is ok, and noticed second email hadn't been saved. He can see only first email
# and he realize he unticked SUCCESS and FAIL for secont email.
first_notification_row = self.browser.find_element_by_xpath("//table/tbody/tr[1]")
first_notification_row.find_element_by_link_text('Edit').click()
form = self.browser.find_element_by_tag_name('form')
self.assertIn('email1@emal.com', form.text)
self.assertNotIn('email2@emal.com', form.text)
# He confirm all settings looks correct and save
first_email_row = self.browser.find_element_by_xpath("//form/table/tbody/tr[1]")
self.assertFalse(first_email_row.find_element_by_name('_success').is_selected())
self.assertTrue(first_email_row.find_element_by_name('_fail').is_selected())
self.assertTrue(form.find_element_by_id('id_deploy_user_1').is_selected())
self.assertFalse(form.find_element_by_id('id_deploy_user_2').is_selected())
self.assertTrue(form.find_element_by_id('id_commit_user_2').is_selected())
self.assertFalse(form.find_element_by_id('id_commit_user_1').is_selected())
form.find_element_by_xpath("//button[@type='submit']").click()
self.wait_for_load()
# He can see another Add Notification button on the page
# He add new notification witout any information and name it No Notifications
page = self.browser.find_element_by_id('page')
page.find_element_by_link_text('Add Notification').click()
self.assertIn('Notification Add', self.browser.find_element_by_tag_name('body').text)
form = self.browser.find_element_by_tag_name('form')
name_field = form.find_element_by_name('name')
name_field.send_keys('No Notifications')
form.find_element_by_xpath("//button[@type='submit']").click()
self.wait_for_load()
# He notice he has two entries on Notification list then.
table = self.browser.find_element_by_class_name('table')
self.assertIn('Default', table.text)
self.assertIn('No Notifications', table.text)
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 2)
# He realize he doesn't need second notification for now, so delete it
# and notice delete confitmation message
second_email_row = self.browser.find_element_by_xpath("//table/tbody/tr[2]")
second_email_row.find_element_by_class_name('dropdown-toggle').click()
second_email_row.find_element_by_link_text('Delete').click()
second_email_row.find_element_by_xpath("//button[@type='submit']").click()
self.wait_for_load()
self.browser.find_element_by_class_name('alert-success')
# He can see only one notification on the list again.
table = self.browser.find_element_by_class_name('table')
self.assertIn('Default', table.text)
self.assertNotIn('No Notifications', table.text)
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 1)
def test_can_see_and_use_log_page(self):
self.user_authenticate()
# User go to log page by click Log link on top menu
nav = self.browser.find_element_by_xpath("//div[@role='navigation']")
nav.find_element_by_link_text("Log").click()
self.wait_for_load()
# He noticed Log Dashboard header
# along with empty log table
page_header = self.browser.find_element_by_class_name('page-header').text
self.assertIn('Log', page_header)
table_first_row = self.browser.find_element_by_xpath("//table/tbody/tr[1]")
self.assertIn('No Results', table_first_row.text)
# He create 2 services and perform one deploy each.
service1 = ServiceFactory()
service2 = ServiceFactory()
Log1 = LogFactory(service=service1)
Log2 = LogFactory(service=service1, status=False)
Log3 = LogFactory(service=service2)
self.browser.get(self.live_server_url + reverse('ftpdeploy_log'))
self.wait_for_load()
# He noticed there are log entries in table
# In addition he notices one deploy fail
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 3)
# He test log filter
# he tick fail status and see only one row in table
form = self.browser.find_element_by_id('log-filter')
form.find_element_by_id('status').click()
time.sleep(0.1)
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 1)
table_first_row = self.browser.find_element_by_xpath("//table/tbody/tr[1]")
self.assertIn(Log2.service.repo_name, table_first_row.text)
# after he untick fail only checkbox and is able to see all 3 rows again
form.find_element_by_id('status').click()
time.sleep(0.1)
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 3)
# next he select log with first service1 assign to it,
# and see two rows in table with appropriate service repo name
form.find_element_by_tag_name('select')
options = form.find_elements_by_tag_name('option')
for option in options:
if option.text == Log1.service.repo_name:
option.click()
time.sleep(0.1)
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 2)
table = self.browser.find_element_by_tag_name('table')
self.assertIn(Log1.service.repo_name, table.text)
# afterwords he select log with service2 assign to it
# and see only one row in table with appropriate service repo name
form.find_element_by_tag_name('select')
options = form.find_elements_by_tag_name('option')
for option in options:
if option.text == Log3.service.repo_name:
option.click()
time.sleep(0.1)
table_rows = self.browser.find_elements_by_xpath("//table/tbody/tr")
self.assertEqual(len(table_rows), 1)
table = self.browser.find_element_by_tag_name('table')
self.assertIn(Log3.service.repo_name, table.text)
def test_can_see_service_dashboard_page_and_manage_services(self):
self.user_authenticate()
# User go to services page by click left menu 'Services' link
page = self.browser.find_element_by_id('page')
page.find_element_by_link_text("Services").click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header').text
# He can see Services Dashboard title
self.assertIn('Services Dashboard', page_header)
# He is welcome with message and see big 'Add Service' button
page = self.browser.find_element_by_id('page')
page.find_element_by_class_name('well')
add_service_btn = self.browser.find_element_by_class_name('btn-success')
self.assertIn('Add Service', add_service_btn.text)
# He click Add service button and go do service form page
add_service_btn.click()
self.wait_for_load()
self.assertIn('Service Add', self.browser.find_element_by_class_name('page-header').text)
page = self.browser.find_element_by_id('page')
page.find_element_by_id('service-form')
# Saveing checking is omit intentionaly because of cost of validation process
# Service is add programmatically without check process
service1 = ServiceFactory(repo_hook=False, status=False)
service2 = ServiceFactory(repo_hook=False, status=False)
service3 = ServiceFactory(repo_hook=False, status=False)
self.browser.get(self.live_server_url + reverse('ftpdeploy_dashboard'))
self.wait_for_load()
# After he added 3 services, he can see all of them on the list
service_list = self.browser.find_element_by_id('service-list')
self.assertIn(service1.repo_name, service_list.text)
self.assertIn(service2.repo_name, service_list.text)
self.assertIn(service3.repo_name, service_list.text)
# He use filter and confirm filter works as expected
form = self.browser.find_element_by_id('service-filter')
options = form.find_elements_by_tag_name('option')
for option in options:
if option.text == service1.repo_name:
option.click()
time.sleep(0.1)
table_rows = self.browser.find_elements_by_xpath("//tbody[@id='service-list']/tr")
self.assertEqual(len(table_rows), 1)
table = self.browser.find_element_by_id('service-list')
self.assertIn(service1.repo_name, table.text)
self.assertNotIn(service2.repo_name, table.text)
self.assertNotIn(service3.repo_name, table.text)
for option in options:
if option.text == service2.repo_name:
option.click()
time.sleep(0.1)
table_rows = self.browser.find_elements_by_xpath("//tbody[@id='service-list']/tr")
self.assertEqual(len(table_rows), 1)
table = self.browser.find_element_by_id('service-list')
self.assertIn(service2.repo_name, table.text)
self.assertNotIn(service1.repo_name, table.text)
self.assertNotIn(service3.repo_name, table.text)
# He noticed he can add new service by the 'Add Service' if service already exist as well
# He click it and see add service form again.
page = self.browser.find_element_by_id('page')
page.find_element_by_link_text('Add Service').click()
self.wait_for_load()
page = self.browser.find_element_by_id('page')
page.find_element_by_id('service-form')
self.browser.get(self.live_server_url + reverse('ftpdeploy_dashboard'))
self.wait_for_load()
# He click 'Edit' link next to the service and go to edit form. He noticed thah form is prepopulated by saved values
first_service = self.browser.find_element_by_xpath("//tbody[@id='service-list']/tr[1]")
first_service.find_element_by_link_text('Edit').click()
self.wait_for_load()
form = self.browser.find_element_by_id('service-form')
repo_name = self.browser.find_element_by_id('id_repo_name')
secret_key = self.browser.find_element_by_id('id_secret_key')
self.assertIn(service1.repo_name, repo_name.get_attribute('value'))
self.assertIn(service1.secret_key, secret_key.get_attribute('value'))
# He notice there is 'Manage' link on page header. He click it and go to Manage Page
page_header = self.browser.find_element_by_class_name('page-header')
page_header.find_element_by_link_text('Manage').click()
self.wait_for_load()
page_header = self.browser.find_element_by_class_name('page-header')
self.assertIn('%s Manage' % service1.repo_name, page_header.text)
self.browser.get(self.live_server_url + reverse('ftpdeploy_dashboard'))
self.wait_for_load()
# He click 'Manage' link next to the service and go to manage page
first_service = self.browser.find_element_by_xpath("//tbody[@id='service-list']/tr[1]")
first_service.find_element_by_link_text('Manage').click()
self.wait_for_load()
# He can see 'Repo_name Manage' header
page_header = self.browser.find_element_by_class_name('page-header')
self.assertIn('%s Manage' % service1.repo_name, page_header.text)
# He notice service status fail because of invalid hook, and see 'Add hook' link
page = self.browser.find_element_by_id('page')
self.assertIn('Add hook', page.text)
# Add hook link test is omit because of time consuming by request
# check flag is change programmatically
service = Service.objects.get(pk=service1.pk)
service.repo_hook = True
service.save()
self.browser.get(self.live_server_url + reverse('ftpdeploy_service_manage', args=(service.pk,)))
self.wait_for_load()
# After he click 'Add hook' he noticed that link disappear
page = self.browser.find_element_by_id('page')
self.assertNotIn('Add hook', page.text)
# After few commits he can see Recent deploys table with latests deploys
log1 = LogFactory(service=service1)
log2 = LogFactory(service=service1)
log3 = LogFactory(service=service1, status=False)
log4 = LogFactory(service=service1, status=False)
self.browser.get(self.live_server_url + reverse('ftpdeploy_service_manage', args=(service.pk,)))
self.wait_for_load()
# He notice two of deploys fails, and is able to see Fail Deploys table along with 'Restore Deploys' button.
fail_deploys = self.browser.find_element_by_id('fail-deploys')
fail_deploys.find_element_by_link_text('Restore Deploys')
restore_list_rows = self.browser.find_elements_by_xpath("//tbody[@id='restore-list']/tr")
self.assertEqual(len(restore_list_rows), 2)
# He decided to skip first of failed deploys, and click 'Skip' button,
# and then skip entry is not in 'Fail Deploys' table any more
first_fail_deploy_row = self.browser.find_element_by_xpath("//tbody[@id='restore-list']/tr[1]")
first_fail_deploy_row.find_element_by_link_text('Skip').click()
first_fail_deploy_row.find_element_by_link_text('Confirm').click()
self.browser.get(self.live_server_url + reverse('ftpdeploy_service_manage', args=(service.pk,)))
self.wait_for_load()
restore_list_rows = self.browser.find_elements_by_xpath("//tbody[@id='restore-list']/tr")
self.assertEqual(len(restore_list_rows), 1)
# He cilck 'Restore Deploys' and see popup with 'Restore Tree' title
fail_deploys = self.browser.find_element_by_id('fail-deploys')
fail_deploys.find_element_by_link_text('Restore Deploys').click()
self.wait.until(lambda browser: browser.find_element_by_id('restore-modal'))
time.sleep(1)
restore_modal = self.browser.find_element_by_id('restore-modal')
modal_title = restore_modal.find_element_by_class_name('modal-title')
self.assertIn('Restore Tree', restore_modal.text)
# He is able to see 'New', 'Modified' and 'Removed' files in restore information
# along with commits informations
self.assertIn('New', restore_modal.text)
self.assertIn('Modified', restore_modal.text)
self.assertIn('Removed', restore_modal.text)
self.assertIn('commit 1', restore_modal.text)
self.assertIn('commit 2', restore_modal.text)
# He click close button and close modal window
restore_modal.find_element_by_xpath("//button[@data-dismiss='modal']").click()
time.sleep(1)
body = self.browser.find_element_by_tag_name('body')
self.assertNotIn('Restore Tree', body.text)
# He decided change notifications for service so he click 'Notification' link
notification = NotificationFactory(name='Default')
self.browser.find_element_by_id('notification').click()
self.wait.until(lambda browser: browser.find_element_by_id('notification-modal'))
time.sleep(1)
restore_modal = self.browser.find_element_by_id('notification-modal')
modal_title = restore_modal.find_element_by_class_name('modal-title')
self.assertIn('Notification', restore_modal.text)
# In the popup he select 'Default' notification and click save
form = self.browser.find_element_by_id('notification-form')
options = form.find_elements_by_tag_name('option')
for option in options:
if option.text == notification.name:
option.click()
time.sleep(1)
form.find_element_by_xpath("//button[@type='submit']").click()
self.wait_for_load()
# He see save notification success message
self.browser.find_element_by_class_name('alert-success')
# He see notification name in Statistics section
page = self.browser.find_element_by_id('page')
self.assertIn('Notifications: Default', page.text)
# He noticed status icon is actually a link to refresh service status
self.browser.find_element_by_xpath("//a[@id='service-manage-status']")
# He click 'Edit' link and see service edit form
page_header = self.browser.find_element_by_class_name('page-header')
page_header.find_element_by_link_text('Edit').click()
self.wait_for_load()
form = self.browser.find_element_by_id('service-form')
repo_name = self.browser.find_element_by_id('id_repo_name')
secret_key = self.browser.find_element_by_id('id_secret_key')
self.assertIn(service1.repo_name, repo_name.get_attribute('value'))
self.assertIn(service1.secret_key, secret_key.get_attribute('value'))
# He decide to delete service and click 'Delete' link, and then confirm delete
page_header = self.browser.find_element_by_class_name('page-header')
page_header.find_element_by_class_name('dropdown-toggle').click()
page_header.find_element_by_link_text('Delete').click()
page_header.find_element_by_xpath("//button[@type='submit']").click()
self.wait_for_load()
self.browser.find_element_by_class_name('alert-success')
# He is redirect to service dashboard
# see success message and doesn't see removed service on the list any more
self.browser.find_element_by_class_name('alert-success')
service_list = self.browser.find_element_by_id('service-list')
self.assertNotIn(service1.repo_name, service_list.text)
self.assertIn(service2.repo_name, service_list.text)
self.assertIn(service3.repo_name, service_list.text)
|
{
"content_hash": "2c1e36801c3afa9e1404b32573234691",
"timestamp": "",
"source": "github",
"line_count": 598,
"max_line_length": 137,
"avg_line_length": 45.979933110367895,
"alnum_prop": 0.6594413732906604,
"repo_name": "lpakula/django-ftp-deploy",
"id": "ceed142ea0136f7ce86959026148d0bd2506e7c8",
"size": "27496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration_tests/functionality_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23216"
},
{
"name": "JavaScript",
"bytes": "10202"
},
{
"name": "Perl",
"bytes": "6798"
},
{
"name": "Python",
"bytes": "190425"
},
{
"name": "Shell",
"bytes": "10626"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0011_remove_user_inlove'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_starting_date',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='starting_date',
field=models.DateTimeField(null=True),
),
]
|
{
"content_hash": "d54d94d6a744c1e1e6a7349a7553a6e2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 24.130434782608695,
"alnum_prop": 0.572972972972973,
"repo_name": "jeffminsungkim/datelikeus",
"id": "4b4b94c1fee7282a884d26e87aa2b5444c7b5879",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datelikeus/users/migrations/0012_auto_20171002_0016.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "148630"
},
{
"name": "HTML",
"bytes": "834894"
},
{
"name": "JavaScript",
"bytes": "122309"
},
{
"name": "Python",
"bytes": "52390"
},
{
"name": "Shell",
"bytes": "4240"
}
],
"symlink_target": ""
}
|
import datetime
import logging
from itertools import chain, product
from actstream.actions import follow, unfollow
from actstream.models import Follow
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField, CICharField
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.db.models.signals import post_delete, pre_delete
from django.db.transaction import on_commit
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.html import format_html
from guardian.shortcuts import assign_perm
from guardian.utils import get_anonymous_user
from machina.apps.forum.models import Forum
from machina.apps.forum_permission.models import (
ForumPermission,
GroupForumPermission,
UserForumPermission,
)
from stdimage import JPEGField
from tldextract import extract
from grandchallenge.anatomy.models import BodyStructure
from grandchallenge.challenges.emails import (
send_challenge_created_email,
send_external_challenge_created_email,
)
from grandchallenge.core.storage import (
get_banner_path,
get_logo_path,
get_social_image_path,
public_s3_storage,
)
from grandchallenge.evaluation.tasks import assign_evaluation_permissions
from grandchallenge.modalities.models import ImagingModality
from grandchallenge.organizations.models import Organization
from grandchallenge.pages.models import Page
from grandchallenge.publications.models import Publication
from grandchallenge.subdomains.utils import reverse
from grandchallenge.task_categories.models import TaskType
logger = logging.getLogger(__name__)
class ChallengeManager(models.Manager):
def non_hidden(self):
"""Filter the hidden challenge"""
return self.filter(hidden=False)
def validate_nounderscores(value):
if "_" in value:
raise ValidationError("Underscores (_) are not allowed.")
def validate_short_name(value):
if value.lower() in settings.DISALLOWED_CHALLENGE_NAMES:
raise ValidationError("That name is not allowed.")
class ChallengeSeries(models.Model):
name = CICharField(max_length=64, blank=False, unique=True)
url = models.URLField(blank=True)
class Meta:
ordering = ("name",)
verbose_name_plural = "Challenge Series"
def __str__(self):
return f"{self.name}"
@property
def badge(self):
return format_html(
(
'<span class="badge badge-info above-stretched-link" '
'title="Associated with {0}"><i class="fas fa-globe fa-fw">'
"</i> {0}</span>"
),
self.name,
)
class ChallengeBase(models.Model):
CHALLENGE_ACTIVE = "challenge_active"
CHALLENGE_INACTIVE = "challenge_inactive"
DATA_PUB = "data_pub"
creator = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
short_name = CICharField(
max_length=50,
blank=False,
help_text=(
"short name used in url, specific css, files etc. "
"No spaces allowed"
),
validators=[
validate_nounderscores,
validate_slug,
validate_short_name,
],
unique=True,
)
description = models.CharField(
max_length=1024,
default="",
blank=True,
help_text="Short summary of this project, max 1024 characters.",
)
title = models.CharField(
max_length=64,
blank=True,
default="",
help_text=(
"The name of the challenge that is displayed on the All Challenges"
" page. If this is blank the short name of the challenge will be "
"used."
),
)
logo = JPEGField(
upload_to=get_logo_path,
storage=public_s3_storage,
blank=True,
help_text="A logo for this challenge. Should be square with a resolution of 640x640 px or higher.",
variations=settings.STDIMAGE_LOGO_VARIATIONS,
)
social_image = JPEGField(
upload_to=get_social_image_path,
storage=public_s3_storage,
blank=True,
help_text="An image for this challenge which is displayed when you post the link on social media. Should have a resolution of 640x320 px (1280x640 px for best display).",
variations=settings.STDIMAGE_SOCIAL_VARIATIONS,
)
hidden = models.BooleanField(
default=True,
help_text="Do not display this Project in any public overview",
)
educational = models.BooleanField(
default=False, help_text="It is an educational challange"
)
workshop_date = models.DateField(
null=True,
blank=True,
help_text=(
"Date on which the workshop belonging to this project will be held"
),
)
event_name = models.CharField(
max_length=1024,
default="",
blank=True,
null=True,
help_text="The name of the event the workshop will be held at",
)
event_url = models.URLField(
blank=True,
null=True,
help_text="Website of the event which will host the workshop",
)
publications = models.ManyToManyField(
Publication,
blank=True,
help_text="Which publications are associated with this challenge?",
)
data_license_agreement = models.TextField(
blank=True,
help_text="What is the data license agreement for this challenge?",
)
task_types = models.ManyToManyField(
TaskType, blank=True, help_text="What type of task is this challenge?"
)
modalities = models.ManyToManyField(
ImagingModality,
blank=True,
help_text="What imaging modalities are used in this challenge?",
)
structures = models.ManyToManyField(
BodyStructure,
blank=True,
help_text="What structures are used in this challenge?",
)
series = models.ManyToManyField(
ChallengeSeries,
blank=True,
help_text="Which challenge series is this associated with?",
)
organizations = models.ManyToManyField(
Organization,
blank=True,
help_text="The organizations associated with this challenge",
related_name="%(class)ss",
)
number_of_training_cases = models.IntegerField(blank=True, null=True)
number_of_test_cases = models.IntegerField(blank=True, null=True)
filter_classes = ArrayField(
CICharField(max_length=32), default=list, editable=False
)
objects = ChallengeManager()
def __str__(self):
return self.short_name
@property
def public(self):
"""Helper property for consistency with other objects"""
return not self.hidden
def get_absolute_url(self):
raise NotImplementedError
@property
def is_self_hosted(self):
return True
@property
def year(self):
if self.workshop_date:
return self.workshop_date.year
else:
return self.created.year
@property
def upcoming_workshop_date(self):
if self.workshop_date and self.workshop_date > datetime.date.today():
return self.workshop_date
@property
def registered_domain(self):
"""
Copied from grandchallenge_tags
Try to find out what framework this challenge is hosted on, return
a string which can also be an id or class in HTML
"""
return extract(self.get_absolute_url()).registered_domain
class Meta:
abstract = True
ordering = ("pk",)
class Challenge(ChallengeBase):
banner = JPEGField(
upload_to=get_banner_path,
storage=public_s3_storage,
blank=True,
help_text=(
"Image that gets displayed at the top of each page. "
"Recommended resolution 2200x440 px."
),
variations=settings.STDIMAGE_BANNER_VARIATIONS,
)
disclaimer = models.CharField(
max_length=2048,
default="",
blank=True,
null=True,
help_text=(
"Optional text to show on each page in the project. "
"For showing 'under construction' type messages"
),
)
require_participant_review = models.BooleanField(
default=False,
help_text=(
"If ticked, new participants need to be approved by project "
"admins before they can access restricted pages. If not ticked, "
"new users are allowed access immediately"
),
)
use_registration_page = models.BooleanField(
default=True,
help_text="If true, show a registration page on the challenge site.",
)
registration_page_text = models.TextField(
default="",
blank=True,
help_text=(
"The text to use on the registration page, you could include "
"a data usage agreement here. You can use HTML markup here."
),
)
use_workspaces = models.BooleanField(default=False)
use_evaluation = models.BooleanField(
default=True,
help_text=(
"If true, use the automated evaluation system. See the evaluation "
"page created in the Challenge site."
),
)
use_teams = models.BooleanField(
default=False,
help_text=(
"If true, users are able to form teams to participate in "
"this challenge together."
),
)
admins_group = models.OneToOneField(
Group,
editable=False,
on_delete=models.PROTECT,
related_name="admins_of_challenge",
)
participants_group = models.OneToOneField(
Group,
editable=False,
on_delete=models.PROTECT,
related_name="participants_of_challenge",
)
forum = models.OneToOneField(
Forum, editable=False, on_delete=models.PROTECT
)
display_forum_link = models.BooleanField(
default=False,
help_text="Display a link to the challenge forum in the nav bar.",
)
cached_num_participants = models.PositiveIntegerField(
editable=False, default=0
)
cached_num_results = models.PositiveIntegerField(editable=False, default=0)
cached_latest_result = models.DateTimeField(
editable=False, blank=True, null=True
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._hidden_orig = self.hidden
def save(self, *args, **kwargs):
adding = self._state.adding
if adding:
self.create_groups()
self.create_forum()
super().save(*args, **kwargs)
if adding:
if self.creator:
self.add_admin(user=self.creator)
self.update_permissions()
self.create_forum_permissions()
self.create_default_pages()
self.create_default_phases()
send_challenge_created_email(self)
if adding or self.hidden != self._hidden_orig:
on_commit(
lambda: assign_evaluation_permissions.apply_async(
kwargs={"challenge_pk": self.pk}
)
)
self.update_user_forum_permissions()
def update_permissions(self):
assign_perm("change_challenge", self.admins_group, self)
def create_forum_permissions(self):
participant_group_perms = {
"can_see_forum",
"can_read_forum",
"can_start_new_topics",
"can_reply_to_topics",
"can_delete_own_posts",
"can_edit_own_posts",
"can_post_without_approval",
"can_create_polls",
"can_vote_in_polls",
}
admin_group_perms = {
"can_lock_topics",
"can_edit_posts",
"can_delete_posts",
"can_approve_posts",
"can_reply_to_locked_topics",
"can_post_announcements",
"can_post_stickies",
*participant_group_perms,
}
permissions = ForumPermission.objects.filter(
codename__in=admin_group_perms
).values_list("codename", "pk")
permissions = {codename: pk for codename, pk in permissions}
GroupForumPermission.objects.bulk_create(
chain(
(
GroupForumPermission(
permission_id=permissions[codename],
group=self.participants_group,
forum=self.forum,
has_perm=True,
)
for codename in participant_group_perms
),
(
GroupForumPermission(
permission_id=permissions[codename],
group=self.admins_group,
forum=self.forum,
has_perm=True,
)
for codename in admin_group_perms
),
)
)
UserForumPermission.objects.bulk_create(
UserForumPermission(
permission_id=permissions[codename],
**{user: True},
forum=self.forum,
has_perm=not self.hidden,
)
for codename, user in product(
["can_see_forum", "can_read_forum"],
["anonymous_user", "authenticated_user"],
)
)
def update_user_forum_permissions(self):
perms = UserForumPermission.objects.filter(
permission__codename__in=["can_see_forum", "can_read_forum"],
forum=self.forum,
)
for p in perms:
p.has_perm = not self.hidden
UserForumPermission.objects.bulk_update(perms, ["has_perm"])
def create_groups(self):
# Create the groups only on first save
admins_group = Group.objects.create(name=f"{self.short_name}_admins")
participants_group = Group.objects.create(
name=f"{self.short_name}_participants"
)
self.admins_group = admins_group
self.participants_group = participants_group
def create_forum(self):
f, created = Forum.objects.get_or_create(
name=settings.FORUMS_CHALLENGE_CATEGORY_NAME, type=Forum.FORUM_CAT,
)
if created:
UserForumPermission.objects.bulk_create(
UserForumPermission(
permission_id=perm_id,
**{user: True},
forum=f,
has_perm=True,
)
for perm_id, user in product(
ForumPermission.objects.filter(
codename__in=["can_see_forum", "can_read_forum"]
).values_list("pk", flat=True),
["anonymous_user", "authenticated_user"],
)
)
self.forum = Forum.objects.create(
name=self.title if self.title else self.short_name,
parent=f,
type=Forum.FORUM_POST,
)
def create_default_pages(self):
Page.objects.create(
title=self.short_name,
html=render_to_string(
"pages/defaults/home.html", {"challenge": self}
),
challenge=self,
permission_level=Page.ALL,
)
Page.objects.create(
title="Contact",
html=render_to_string(
"pages/defaults/contact.html", {"challenge": self}
),
challenge=self,
permission_level=Page.REGISTERED_ONLY,
)
def create_default_phases(self):
self.phase_set.create(challenge=self)
def is_admin(self, user) -> bool:
"""Determines if this user is an admin of this challenge."""
return (
user.is_superuser
or user.groups.filter(pk=self.admins_group.pk).exists()
)
def is_participant(self, user) -> bool:
"""Determines if this user is a participant of this challenge."""
return (
user.is_superuser
or user.groups.filter(pk=self.participants_group.pk).exists()
)
def get_admins(self):
"""Return all admins of this challenge."""
return self.admins_group.user_set.all()
def get_participants(self):
"""Return all participants of this challenge."""
return self.participants_group.user_set.all()
def get_absolute_url(self):
return reverse(
"pages:home", kwargs={"challenge_short_name": self.short_name},
)
def add_participant(self, user):
if user != get_anonymous_user():
user.groups.add(self.participants_group)
follow(
user=user, obj=self.forum, actor_only=False, send_action=False
)
else:
raise ValueError("You cannot add the anonymous user to this group")
def remove_participant(self, user):
user.groups.remove(self.participants_group)
unfollow(user=user, obj=self.forum, send_action=False)
def add_admin(self, user):
if user != get_anonymous_user():
user.groups.add(self.admins_group)
follow(
user=user, obj=self.forum, actor_only=False, send_action=False
)
else:
raise ValueError("You cannot add the anonymous user to this group")
def remove_admin(self, user):
user.groups.remove(self.admins_group)
unfollow(user=user, obj=self.forum, send_action=False)
class Meta(ChallengeBase.Meta):
verbose_name = "challenge"
verbose_name_plural = "challenges"
@receiver(post_delete, sender=Challenge)
def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):
"""
Deletes the related groups.
We use a signal rather than overriding delete() to catch usages of
bulk_delete.
"""
try:
instance.admins_group.delete(using=using)
except ObjectDoesNotExist:
pass
try:
instance.participants_group.delete(using=using)
except ObjectDoesNotExist:
pass
class ExternalChallenge(ChallengeBase):
homepage = models.URLField(
blank=False, help_text=("What is the homepage for this challenge?")
)
data_stored = models.BooleanField(
default=False,
help_text=("Has the grand-challenge team stored the data?"),
)
def save(self, *args, **kwargs):
adding = self._state.adding
super().save(*args, **kwargs)
if adding:
send_external_challenge_created_email(self)
def get_absolute_url(self):
return self.homepage
@property
def is_self_hosted(self):
return False
@receiver(pre_delete, sender=Challenge)
@receiver(pre_delete, sender=ExternalChallenge)
def delete_challenge_follows(*_, instance: Challenge, **__):
ct = ContentType.objects.filter(
app_label=instance._meta.app_label, model=instance._meta.model_name
).get()
Follow.objects.filter(object_id=instance.pk, content_type=ct).delete()
|
{
"content_hash": "8665fd4df825b8feb2dfeba3a47ec878",
"timestamp": "",
"source": "github",
"line_count": 613,
"max_line_length": 178,
"avg_line_length": 31.843393148450243,
"alnum_prop": 0.602766393442623,
"repo_name": "comic/comic-django",
"id": "8f461aee85cbec5d2ff57ce0fab617a3a8a6cfb4",
"size": "19520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/challenges/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from celery.task import task, periodic_task
from celery.schedules import crontab
from django.contrib.auth.models import User
from django.utils import simplejson
from models import RawTrackingEvent
import logging
logger = logging.getLogger(__name__)
@task(name='user_analytics.set_warning')
def async_set_warning(**kwargs):
u = User.objects.get(pk=kwargs['user_id'])
msg = "%s | (%s) %s %s last_login: %s " % (kwargs['message'],
u.username or '',
u.first_name or '',
u.last_name or '',
u.last_login or '')
logger.warning(msg)
return
@task(name='user_analytics.register_event')
def async_register_event(**kwargs):
original_cookie = kwargs.get('cookie', None)
#check that the cookie has not been tampered with
from tracking import verify_tracking_key
cookie = verify_tracking_key(original_cookie)
if cookie is None:
if original_cookie == None:
original_cookie = ''
logger.warning('cookie %s has been tampered with' % (original_cookie))
return
event_name = kwargs.get('event_name', 'UNDEFINED')
event_time = kwargs.get('event_time', None)
event_data = kwargs.get('event_data', None)
raw_request = kwargs.get('request', None)
#Do some filtering before we write to the db.
# skip favico.ico requests. This case only happens if we are in debug mode
# and we are serving favicon.ico file using the debug server (since there is no
# nginx or apache directive that would proxy that to a static file instead
if kwargs['event_name'] == 'PAGE_VISITED':
if raw_request is not None:
if '/favicon.ico' in raw_request['PATH_INFO']:
return
if '/admin' in raw_request['PATH_INFO']:
return
raw_request_json = simplejson.dumps(raw_request)
#event_data_json = simplejson.dumps(event_data)
try:
tracking_event = RawTrackingEvent()
tracking_event.event_time = event_time
tracking_event.name = event_name
tracking_event.cookie = cookie
tracking_event.raw_request = raw_request_json
tracking_event.event_data = event_data
tracking_event.save()
except Exception, exc:
pass
async_register_event.retry(exc=exc)
@periodic_task(name='user_analytics.combine_periodic_task', run_every=crontab(hour='0', minute='0', day_of_week='*'))
def async_combine_periodic_task():
return
|
{
"content_hash": "8030cad94de41c493151e8bfbb7747f7",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 117,
"avg_line_length": 32.607594936708864,
"alnum_prop": 0.6242236024844721,
"repo_name": "rburhum/django-user-analytics",
"id": "c9aa1b4ec52def9061681fd5115a7dd490f13b91",
"size": "2576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_analytics/tasks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "40010"
},
{
"name": "Python",
"bytes": "27287"
}
],
"symlink_target": ""
}
|
"""Module with specification of all supported command line arguments."""
import argparse
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument('-dl', '--disable-liveness',
help='disable the service liveness checks',
action='store_true')
cli_parser.add_argument('-ds', '--disable-sla',
help='disable the SLA part',
action='store_true')
cli_parser.add_argument('-dq', '--disable-code-quality',
help='disable the code quality check',
action='store_true')
cli_parser.add_argument('-dj', '--disable-ci-jobs',
help='disable CI jobs table generation',
action='store_true')
cli_parser.add_argument('-c', '--clone-repositories',
help='make local clone of all repositories',
action='store_true')
cli_parser.add_argument('-d', '--cleanup-repositories',
help='cleanup the local clones of all repositories',
action='store_true')
|
{
"content_hash": "d6128d5c36324603bdbef365135f0d85",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 76,
"avg_line_length": 38.6551724137931,
"alnum_prop": 0.5432649420160571,
"repo_name": "jpopelka/fabric8-analytics-common",
"id": "e76fd1534e9eb863cc9283820505fab8b61f327b",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/src/cliargs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "998"
},
{
"name": "Gherkin",
"bytes": "140658"
},
{
"name": "HTML",
"bytes": "25307"
},
{
"name": "Python",
"bytes": "354439"
},
{
"name": "Shell",
"bytes": "9619"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
from django.conf import settings
from django.contrib import admin
from django.test.utils import override_settings
from cms.models import Page
from cms.utils.compat.dj import is_installed
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_ADD, URL_CMS_PLUGIN_ADD
APPS_WITHOUT_REVERSION = [app for app in settings.INSTALLED_APPS if app != 'reversion']
class ContainerPluginTest(CMSTestCase):
def setUp(self):
self.admin_site = admin.sites.AdminSite()
self.user = self.get_superuser()
self.password = "top_secret"
self.user.set_password(self.password)
self.user.save()
self.client.login(username=self.user.username, password=self.password)
self.language = 'en'
self.site_id = settings.SITE_ID
# create page
response = self.client.post(
'/' + self.language + URL_CMS_PAGE_ADD[3:],
data={
'language': self.language,
'site': self.site_id,
'template': 'INHERIT',
'title': 'HOME',
'slug': 'home',
'_save': 'Save',
},
follow=True,
)
self.assertEqual(response.status_code, 200)
# get page and placeholder
self.page = Page.objects.get(publisher_is_draft=True, is_home=True)
self.placeholder = self.page.placeholders.get(slot='Main Content')
def _create_and_configure_a_container_plugin(self):
# create a plugin
response = self.client.post(
'/' + self.language + URL_CMS_PLUGIN_ADD[3:],
data={
'plugin_parent': '',
'plugin_type': 'BootstrapContainerPlugin',
'plugin_language': self.language,
'placeholder_id': self.placeholder.id,
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content.decode('utf-8'))
plugin_url = response_data['url']
# configure that plugin
response = self.client.post(
plugin_url,
data={
'_popup': '1',
'breakpoints': ['xs', 'lg'],
'_save': 'Save',
},
)
self.assertEqual(response.status_code, 200)
@override_settings(INSTALLED_APPS=APPS_WITHOUT_REVERSION)
def test_without_reversion(self):
self.assertFalse(is_installed('reversion'))
self._create_and_configure_a_container_plugin()
def test_with_reversion(self):
self.assertTrue(is_installed('reversion'))
self._create_and_configure_a_container_plugin()
|
{
"content_hash": "6a71523271ec123255c596fb15f79753",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 87,
"avg_line_length": 35.79220779220779,
"alnum_prop": 0.5870827285921626,
"repo_name": "schacki/djangocms-cascade",
"id": "727fe77723794f78e007ce6892a6dbf8d10f3794",
"size": "2780",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2562"
},
{
"name": "HTML",
"bytes": "12812"
},
{
"name": "JavaScript",
"bytes": "88398"
},
{
"name": "Python",
"bytes": "281313"
}
],
"symlink_target": ""
}
|
import cStringIO
import csv
import logging
import os.path
import pickle
import re
import sys
# for eval context:
import time
import openerp
import openerp.release
import openerp.workflow
from yaml_import import convert_yaml_import
import assertion_report
_logger = logging.getLogger(__name__)
try:
import pytz
except:
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
import misc
from config import config
from translate import _
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from misc import SKIPPED_ELEMENT_TYPES
from misc import unquote
from openerp import SUPERUSER_ID
# Import of XML records requires the unsafe eval as well,
# almost everywhere, which is ok because it supposedly comes
# from trusted data, but at least we make it obvious now.
unsafe_eval = eval
from safe_eval import safe_eval as eval
class ParseError(Exception):
def __init__(self, msg, text, filename, lineno):
self.msg = msg
self.text = text
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s" while parsing %s:%s, near\n%s' \
% (self.msg, self.filename, self.lineno, self.text)
def _ref(self, cr):
return lambda x: self.id_get(cr, x)
def _obj(pool, cr, uid, model_str, context=None):
model = pool[model_str]
return lambda x: model.browse(cr, uid, x, context=context)
def _get_idref(self, cr, uid, model_str, context, idref):
idref2 = dict(idref,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=openerp.release.major_version,
ref=_ref(self, cr),
pytz=pytz)
if len(model_str):
idref2['obj'] = _obj(self.pool, cr, uid, model_str, context=context)
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model', '').encode('utf-8')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'id').encode('utf-8')
f_name = node.get("name",'').encode('utf-8')
idref2 = {}
if f_search:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
q = unsafe_eval(f_search, idref2)
ids = pool[f_model].search(cr, uid, q)
if f_use != 'id':
ids = map(lambda x: x[f_use], pool[f_model].read(cr, uid, ids, [f_use]))
_cols = pool[f_model]._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if a_eval:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logging.getLogger('openerp.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), context)
raise
def _process(s, idref):
matches = re.finditer('[^%]%\((.*?)\)[ds]', s)
done = []
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.append(found)
id = m.groups()[0]
if not id in idref:
idref[id] = self.id_get(cr, id)
s = s.replace(found, str(idref[id]))
s = s.replace('%%', '%') # Quite wierd but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
if t == 'html':
return _process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
data = node.text
if node.get('file'):
with openerp.tools.file_open(node.get('file'), 'rb') as f:
data = f.read()
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'base64':
return data.encode('base64')
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
# FIXME: should probably be exclusive
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, x)
args = unsafe_eval(a_eval, idref)
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val is not None:
args.append(return_val)
model = pool[node.get('model', '')]
method = node.get('name')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
return node.text
escape_re = re.compile(r'(?<!\\)/')
def escape(x):
return x.replace('\\/', '/')
class xml_import(object):
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (len(data_node) and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (len(data_node) and data_node.get('context','').encode('utf8'))
node_context = node.get("context",'').encode('utf8')
context = {}
for ctx in (data_node_context, node_context):
if ctx:
try:
ctx_res = unsafe_eval(ctx, eval_dict)
if isinstance(context, dict):
context.update(ctx_res)
else:
context = ctx_res
except NameError:
# Some contexts contain references that are only valid at runtime at
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if node_uid:
return self.id_get(cr, node_uid)
return uid
def _test_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.pool['ir.module.module'].search_count(self.cr, self.uid, ['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None, mode=None):
d_model = rec.get("model")
d_search = rec.get("search",'').encode('utf-8')
d_id = rec.get("id")
ids = []
if d_search:
idref = _get_idref(self, cr, self.uid, d_model, context={}, idref={})
try:
ids = self.pool[d_model].search(cr, self.uid, unsafe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
pass
if d_id:
try:
ids.append(self.id_get(cr, d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
pass
if ids:
self.pool[d_model].unlink(cr, self.uid, ids)
def _remove_ir_values(self, cr, name, value, model):
ir_values_obj = self.pool['ir.values']
ir_value_ids = ir_values_obj.search(cr, self.uid, [('name','=',name),('value','=',value),('model','=',model)])
if ir_value_ids:
ir_values_obj.unlink(cr, self.uid, ir_value_ids)
return True
def _tag_report(self, cr, rec, data_node=None, mode=None):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f,'').encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),
('attachment','attachment'),('attachment_use','attachment_use'), ('usage','usage'),
('report_type', 'report_type'), ('parser', 'parser')):
if rec.get(field):
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = eval(rec.get('auto','False'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and eval(rec.get('multi','False'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if not rec.get('menu') or eval(rec.get('menu','False')):
keyword = str(rec.get('keyword', 'client_print_multi'))
value = 'ir.actions.report.xml,'+str(id)
replace = rec.get('replace', True)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id)
elif self.mode=='update' and eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
value = 'ir.actions.report.xml,'+str(id)
self._remove_ir_values(cr, res['name'], value, res['model'])
return id
def _tag_function(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return
def _tag_url(self, cr, rec, data_node=None, mode=None):
url = rec.get("url",'').encode('utf8')
target = rec.get("target",'').encode('utf8')
name = rec.get("name",'').encode('utf8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
res = {'name': name, 'url': url, 'target':target}
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.act_url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
def _tag_act_window(self, cr, rec, data_node=None, mode=None):
name = rec.get('name','').encode('utf-8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
type = rec.get('type','').encode('utf-8') or 'ir.actions.act_window'
view_id = False
if rec.get('view_id'):
view_id = self.id_get(cr, rec.get('view_id','').encode('utf-8'))
domain = rec.get('domain','').encode('utf-8') or '[]'
res_model = rec.get('res_model','').encode('utf-8')
src_model = rec.get('src_model','').encode('utf-8')
view_type = rec.get('view_type','').encode('utf-8') or 'form'
view_mode = rec.get('view_mode','').encode('utf-8') or 'tree,form'
usage = rec.get('usage','').encode('utf-8')
limit = rec.get('limit','').encode('utf-8')
auto_refresh = rec.get('auto_refresh','').encode('utf-8')
uid = self.uid
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
def ref(str_id):
return self.id_get(cr, str_id)
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': type,
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
'uid' : uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
'ref' : ref,
}
context = self.get_context(data_node, rec, eval_context)
try:
domain = unsafe_eval(domain, eval_context)
except NameError:
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if rec.get('multi'):
res['multi'] = eval(rec.get('multi', 'False'))
id = self.pool['ir.model.data']._update(cr, self.uid, 'ir.actions.act_window', self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if src_model:
#keyword = 'client_action_relate'
keyword = rec.get('key2','').encode('utf-8') or 'client_action_relate'
value = 'ir.actions.act_window,'+str(id)
replace = rec.get('replace','') or True
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
def _tag_ir_set(self, cr, rec, data_node=None, mode=None):
if self.mode != 'init':
return
res = {}
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool['ir.model.data'].ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
def _tag_workflow(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = rec.get('model').encode('ascii')
w_ref = rec.get('ref')
if w_ref:
id = self.id_get(cr, w_ref)
else:
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
openerp.workflow.trg_validate(
uid, model, id, rec.get('action').encode('ascii'), cr)
#
# Support two types of notation:
# name="Inventory Control/Sending Goods"
# or
# action="action_id"
# parent="parent_id"
#
def _tag_menuitem(self, cr, rec, data_node=None, mode=None):
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
m_l = map(escape, escape_re.split(rec.get("name",'').encode('utf8')))
values = {'parent_id': False}
if rec.get('parent', False) is False and len(m_l) > 1:
# No parent attribute specified and the menu name has several menu components,
# try to determine the ID of the parent according to menu path
pid = False
res = None
values['name'] = m_l[-1]
m_l = m_l[:-1] # last part is our name, not a parent
for idx, menu_elem in enumerate(m_l):
if pid:
cr.execute('select id from ir_ui_menu where parent_id=%s and name=%s', (pid, menu_elem))
else:
cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (menu_elem,))
res = cr.fetchone()
if res:
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool['ir.ui.menu'].create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
if rec.get('parent'):
menu_parent_id = self.id_get(cr, rec.get('parent',''))
else:
# we get here with <menuitem parent="">, explicit clear of parent, or
# if no parent attribute at all but menu name is not a menu path
menu_parent_id = False
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, rec.get('id','')) ]
except:
res = None
if rec.get('action'):
a_action = rec.get('action','').encode('utf8')
# determine the type of action
action_type, action_id = self.model_id_get(cr, a_action)
action_type = action_type.split('.')[-1] # keep only type part
if not values.get('name') and action_type in ('act_window', 'wizard', 'url', 'client', 'server'):
a_table = 'ir_act_%s' % action_type.replace('act_', '')
cr.execute('select name from "%s" where id=%%s' % a_table, (int(action_id),))
resw = cr.fetchone()
if resw:
values['name'] = resw[0]
if not values.get('name'):
# ensure menu has a name
values['name'] = rec_id or '?'
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
pid = self.pool['ir.model.data']._update(cr, self.uid, 'ir.ui.menu', self.module, values, rec_id, noupdate=self.isnoupdate(data_node), mode=self.mode, res_id=res and res[0] or False)
if rec_id and pid:
self.idref[rec_id] = int(pid)
if rec.get('action') and pid:
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id)
return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec)
def _tag_assert(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
rec_model = rec.get("model",'').encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count")
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
ids = None
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if rec_id:
ids = [self.id_get(cr, rec_id)]
elif rec_src:
q = unsafe_eval(rec_src, eval_dict)
ids = self.pool[rec_model].search(cr, uid, q, context=context)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n' \
% (rec_string, count, len(ids))
_logger.error(msg)
return
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
class d(dict):
def __getitem__(self2, key):
if key in brrec:
return brrec[key]
return dict.__getitem__(self2, key)
globals_dict = d()
globals_dict['floatEqual'] = self._assert_equals
globals_dict['ref'] = ref
globals_dict['_ref'] = ref
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = unsafe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
_logger.error(msg)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _tag_record(self, cr, rec, data_node=None, mode=None):
rec_model = rec.get("model").encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
rec_context = rec.get("context", None)
if rec_context:
rec_context = unsafe_eval(rec_context)
self._test_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitely
# opt-out using @noupdate="1". A second check will be performed in
# ir.model.data#_update() using the record's ir.model.data `noupdate` field.
if self.isnoupdate(data_node) and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
if '.' in rec_id:
module,rec_id2 = rec_id.split('.')
else:
module = self.module
rec_id2 = rec_id
id = self.pool['ir.model.data']._update_dummy(cr, self.uid, rec_model, module, rec_id2)
if id:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = int(id)
return None
elif not self.nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name").encode('utf-8')
f_ref = field.get("ref",'').encode('utf-8')
f_search = field.get("search",'').encode('utf-8')
f_model = field.get("model",'').encode('utf-8')
if not f_model and model._all_columns.get(f_name):
f_model = model._all_columns[f_name].column._obj
f_use = field.get("use",'').encode('utf-8') or 'id'
f_val = False
if f_search:
q = unsafe_eval(f_search, self.idref)
assert f_model, 'Define an attribute model="..." in your .XML file !'
f_obj = self.pool[f_model]
# browse the objects searched
s = f_obj.browse(cr, self.uid, f_obj.search(cr, self.uid, q))
# column definitions of the "local" object
_cols = self.pool[rec_model]._all_columns
# if the current field is many2many
if (f_name in _cols) and _cols[f_name].column._type=='many2many':
f_val = [(6, 0, map(lambda x: x[f_use], s))]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._all_columns \
and model._all_columns[f_name].column._type == 'reference':
val = self.model_id_get(cr, f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(cr, f_ref)
else:
f_val = _eval_xml(self,field, self.pool, cr, self.uid, self.idref)
if f_name in model._all_columns:
import openerp.osv as osv
if isinstance(model._all_columns[f_name].column, osv.fields.integer):
f_val = int(f_val)
res[f_name] = f_val
id = self.pool['ir.model.data']._update(cr, self.uid, rec_model, self.module, res, rec_id or False, not self.isnoupdate(data_node), noupdate=self.isnoupdate(data_node), mode=self.mode, context=rec_context )
if rec_id:
self.idref[rec_id] = int(id)
if config.get('import_partial'):
cr.commit()
return rec_model, id
def _tag_template(self, cr, el, data_node=None, mode=None):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name', '')).encode('ascii')
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
record_attrs = {
'id': tpl_id,
'model': 'ir.ui.view',
}
for att in ['forcecreate', 'context']:
if att in el.keys():
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field("qweb", name='type'))
record.append(Field(el.get('priority', "16"), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if el.get('active') in ("True", "False") and mode != "update":
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = map(lambda x: "ref('%s')" % x, groups.split(','))
record.append(Field(name="groups_id", eval="[(6, 0, ["+', '.join(grp_lst)+"])]"))
if el.attrib.pop('page', None) == 'True':
record.append(Field(name="page", eval="True"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(cr, record, data_node)
def id_get(self, cr, id_str):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(cr, id_str)
if res and len(res)>1: res = res[1]
return res
def model_id_get(self, cr, id_str):
model_data_obj = self.pool['ir.model.data']
mod = self.module
if '.' in id_str:
mod,id_str = id_str.split('.')
return model_data_obj.get_object_reference(cr, self.uid, mod, id_str)
def parse(self, de, mode=None):
if de.tag != 'openerp':
raise Exception("Mismatch xml format: root tag must be `openerp`.")
for n in de.findall('./data'):
for rec in n:
if rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, n, mode=mode)
except Exception, e:
self.cr.rollback()
exc_info = sys.exc_info()
raise ParseError, (misc.ustr(e), etree.tostring(rec).rstrip(), rec.getroottree().docinfo.URL, rec.sourceline), exc_info[2]
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False):
self.mode = mode
self.module = module
self.cr = cr
self.idref = idref
self.pool = openerp.registry(cr.dbname)
self.uid = 1
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'workflow': self._tag_workflow,
'report': self._tag_report,
'ir_set': self._tag_ir_set,
'act_window': self._tag_act_window,
'url': self._tag_url,
'assert': self._tag_assert,
}
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, report=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
fp = misc.file_open(pathname)
ext = os.path.splitext(filename)[1].lower()
try:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.yml':
convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
def convert_sql_import(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
if not idref:
idref={}
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
input = cStringIO.StringIO(csvcontent) #FIXME
reader = csv.reader(input, quotechar='"', delimiter=',')
fields = reader.next()
fname_partial = ""
if config.get('import_partial'):
fname_partial = module + '/'+ fname
if not os.path.isfile(config.get('import_partial')):
pickle.dump({}, file(config.get('import_partial'),'w+'))
else:
data = pickle.load(file(config.get('import_partial')))
if fname_partial in data:
if not data[fname_partial]:
return
else:
for i in range(data[fname_partial]):
reader.next()
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
datas = []
for line in reader:
if not (line and any(line)):
continue
try:
datas.append(map(misc.ustr, line))
except:
_logger.error("Cannot import the line: %s", line)
registry = openerp.registry(cr.dbname)
result, rows, warning_msg, dummy = registry[model].import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
data[fname_partial] = 0
pickle.dump(data, file(config.get('import_partial'),'wb'))
cr.commit()
#
# xml import/export
#
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception:
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:
idref={}
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate)
obj.parse(doc.getroot(), mode=mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "a30d1c8259a35513842318c3ff9fda73",
"timestamp": "",
"source": "github",
"line_count": 968,
"max_line_length": 219,
"avg_line_length": 41.52789256198347,
"alnum_prop": 0.5278738277071569,
"repo_name": "diogocs1/comps",
"id": "7f56322dc94059b4a924150595ea4bbbaa492cb1",
"size": "41178",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "web/openerp/tools/convert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
from itertools import chain
from django import forms
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
class SortedCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
class Media:
js = (
'admin/js/jquery.init.js',
'sortedm2m/widget.js',
'sortedm2m/jquery-ui.js',
)
css = {'screen': (
'sortedm2m/widget.css',
)}
def build_attrs(self, attrs=None, **kwargs): # pylint: disable=arguments-differ
attrs = dict(attrs or {}, **kwargs)
attrs = super().build_attrs(attrs)
classes = attrs.setdefault('class', '').split()
classes.append('sortedm2m')
attrs['class'] = ' '.join(classes)
return attrs
def render(self, name, value, attrs=None, choices=(), renderer=None): # pylint: disable=arguments-differ
if value is None:
value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
# Normalize to strings
str_values = [force_str(v) for v in value]
selected = []
unselected = []
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = ' for="%s"' % conditional_escape(final_attrs['id'])
else:
label_for = ''
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_str(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_str(option_label))
item = {
'label_for': label_for,
'rendered_cb': rendered_cb,
'option_label': option_label,
'option_value': option_value
}
if option_value in str_values:
selected.append(item)
else:
unselected.append(item)
# Reorder `selected` array according str_values which is a set of `option_value`s in the
# order they should be shown on screen
ordered = []
for s in str_values:
for select in selected:
if s == select['option_value']:
ordered.append(select)
selected = ordered
html = render_to_string(
'sortedm2m/sorted_checkbox_select_multiple_widget.html',
{'selected': selected, 'unselected': unselected})
return mark_safe(html)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if isinstance(value, (str,)):
return [v for v in value.split(',') if v]
return value
class SortedMultipleChoiceField(forms.ModelMultipleChoiceField):
widget = SortedCheckboxSelectMultiple
def clean(self, value):
queryset = super().clean(value)
if value is None or not hasattr(queryset, '__iter__'):
return queryset
key = self.to_field_name or 'pk'
objects = dict((force_str(getattr(o, key)), o) for o in queryset)
return [objects[force_str(val)] for val in value]
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_str(value) for value in self.prepare_value(initial)]
data_set = [force_str(value) for value in data]
return data_set != initial_set
|
{
"content_hash": "3030f7c710615435db39f8f764095a76",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 109,
"avg_line_length": 37.056603773584904,
"alnum_prop": 0.5789205702647657,
"repo_name": "gregmuellegger/django-sortedm2m",
"id": "04e65b8eb6402fba32ec20a142c51fd86b125563",
"size": "3928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sortedm2m/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "902"
},
{
"name": "HTML",
"bytes": "1258"
},
{
"name": "JavaScript",
"bytes": "5219"
},
{
"name": "Makefile",
"bytes": "132"
},
{
"name": "Python",
"bytes": "87585"
}
],
"symlink_target": ""
}
|
"""
sentry.interfaces.user
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('User',)
from sentry.interfaces.base import Interface
from sentry.utils.safe import trim, trim_dict
from sentry.web.helpers import render_to_string
from ipaddr import IPAddress
def validate_ip(value, required=True):
if not required and not value:
return
# will raise a ValueError
IPAddress(value)
return value
class User(Interface):
"""
An interface which describes the authenticated User for a request.
You should provide **at least** either an `id` (a unique identifier for
an authenticated user) or `ip_address` (their IP address).
All other data is.
>>> {
>>> "id": "unique_id",
>>> "username": "my_user",
>>> "email": "foo@example.com"
>>> "ip_address": "127.0.0.1"
>>> }
"""
@classmethod
def to_python(cls, data):
data = data.copy()
extra_data = data.pop('data', data)
if not isinstance(extra_data, dict):
extra_data = {}
kwargs = {
'id': trim(data.pop('id', None), 128),
'email': trim(data.pop('email', None), 128),
'username': trim(data.pop('username', None), 128),
'ip_address': validate_ip(data.pop('ip_address', None), False),
}
kwargs['data'] = trim_dict(extra_data)
return cls(**kwargs)
def get_path(self):
return 'sentry.interfaces.User'
def get_hash(self):
return []
def get_context(self):
return {
'user_ip_address': self.ip_address,
'user_id': self.id,
'user_username': self.username,
'user_email': self.email,
'user_data': self.data,
}
def to_html(self, event, is_public=False, **kwargs):
if is_public:
return ''
context = self.get_context()
context.update({
'is_public': is_public,
'event': event,
})
return render_to_string('sentry/partial/interfaces/user.html', context)
def to_email_html(self, event, **kwargs):
context = self.get_context()
return render_to_string('sentry/partial/interfaces/user_email.html', context)
|
{
"content_hash": "a7fb6635a89da0afc4b9db710772f047",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 85,
"avg_line_length": 26.98876404494382,
"alnum_prop": 0.5732722731057452,
"repo_name": "jokey2k/sentry",
"id": "f11e72e61f791e2c7fd19cf657177228ac8ba7f5",
"size": "2402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/interfaces/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "580459"
},
{
"name": "Gettext Catalog",
"bytes": "2933595"
},
{
"name": "HTML",
"bytes": "292821"
},
{
"name": "JavaScript",
"bytes": "608760"
},
{
"name": "Makefile",
"bytes": "2710"
},
{
"name": "Python",
"bytes": "5105385"
}
],
"symlink_target": ""
}
|
"""Collects and renders Python and Flask application evironment and configuration."""
try:
from cgi import escape
except ImportError:
from html import escape
import os
import platform
import socket
import sys
from . import app
optional_modules_list = [
"Cookie",
"mod_wsgi",
"psycopg2",
"zlib",
"gzip",
"bz2",
"zipfile",
"tarfile",
"ldap",
"socket",
"audioop",
"curses",
"imageop",
"aifc",
"sunau",
"wave",
"chunk",
"colorsys",
"rgbimg",
"imghdr",
"sndhdr",
"ossaudiodev",
"sunaudiodev",
"adodbapi",
"cx_Oracle",
"ibm_db",
"mxODBC",
"MySQLdb",
"pgdb",
"PyDO",
"sapdbapi",
"sqlite3",
]
def _info():
for i in optional_modules_list:
try:
module = __import__(i)
sys.modules[i] = module
globals()[i] = module
except Exception:
pass
info = {}
info["python_version"] = platform.python_version()
info["system_info"] = get_system_info()
info["py_internals"] = get_py_internals()
info["os_internals"] = get_os_internals()
info["envvars"] = get_envvars()
info["database_info"] = get_database_info()
info["compression_info"] = get_compression_info()
info["socket_info"] = get_socket_info()
info["multimedia_info"] = get_multimedia_info()
info["app_config"] = app.config
return info
def get_system_info(): # noqa: D103
system_info = []
if sys.version_info.minor < 8:
distname = platform.linux_distribution()[0]
version = platform.linux_distribution()[1]
else:
import distro
distname = distro.name()
version = distro.version()
if distname != "" and version != "":
os_version = "%s %s (%s %s)" % (platform.system(), platform.release(), distname, version)
else:
os_version = "%s %s" % (platform.system(), platform.release())
system_info.append(("OS Version", os_version))
if hasattr(os, "path"):
system_info.append(("OS Path", os.environ["PATH"]))
if hasattr(sys, "version"):
system_info.append(("Python Version", sys.version))
if hasattr(sys, "subversion"):
system_info.append(("Python Subversion", sys.subversion[0]))
if hasattr(sys, "prefix"):
system_info.append(("Python Prefix", sys.prefix))
if hasattr(sys, "path"):
system_info.append(("Python Path", sys.path))
if hasattr(sys, "executable"):
system_info.append(("Python Executable", sys.executable))
if hasattr(sys, "api_version"):
system_info.append(("Python API", sys.api_version))
system_info.append(("Build Date", platform.python_build()[1]))
system_info.append(("Compiler", platform.python_compiler()))
return system_info
def get_py_internals(): # noqa: D103
py_internals = []
if hasattr(sys, "builtin_module_names"):
py_internals.append(("Built-in Modules", ", ".join(sys.builtin_module_names)))
py_internals.append(("Byte Order", sys.byteorder + " endian"))
if hasattr(sys, "getcheckinterval"):
py_internals.append(("Check Interval", sys.getcheckinterval()))
if hasattr(sys, "getfilesystemencoding"):
py_internals.append(("File System Encoding", sys.getfilesystemencoding()))
max_integer_size = str(sys.maxsize) + " (%s)" % hex(sys.maxsize).upper()
py_internals.append(("Maximum Integer Size", max_integer_size))
if hasattr(sys, "getrecursionlimit"):
py_internals.append(("Maximum Recursion Depth", sys.getrecursionlimit()))
if hasattr(sys, "tracebacklimit"):
traceback_limit = sys.tracebacklimit
else:
traceback_limit = 1000
py_internals.append(("Maximum Traceback Limit", traceback_limit))
py_internals.append(("Maximum Code Point", sys.maxunicode))
return py_internals
def get_os_internals(): # noqa: D103
os_internals = []
if hasattr(os, "getcwd"):
os_internals.append(("Current Working Directory", os.getcwd()))
if hasattr(os, "getegid"):
os_internals.append(("Effective Group ID", os.getegid()))
if hasattr(os, "geteuid"):
os_internals.append(("Effective User ID", os.geteuid()))
if hasattr(os, "getgid"):
os_internals.append(("Group ID", os.getgid()))
if hasattr(os, "getuid"):
os_internals.append(("User ID", os.getuid()))
if hasattr(os, "getgroups"):
os_internals.append(("Group Membership", ", ".join(map(str, os.getgroups()))))
if hasattr(os, "linesep"):
os_internals.append(("Line Seperator", repr(os.linesep)[1:-1]))
if hasattr(os, "pathsep"):
os_internals.append(("Path Seperator", os.pathsep))
if hasattr(os, "getloadavg"):
os_internals.append(
("Load Avarage", ", ".join([str(round(x, 2)) for x in os.getloadavg()]))
)
return os_internals
def get_envvars(): # noqa: D103
return [(k, escape(v, quote=True)) for k, v in list(os.environ.items())]
def get_database_info(): # noqa: D103
database_info = []
database_info.append(("DB2/Informix (ibm_db)", is_imported("ibm_db")))
database_info.append(("MSSQL (adodbapi)", is_imported("adodbapi")))
database_info.append(("MySQL (MySQL-Python)", is_imported("MySQLdb")))
database_info.append(("ODBC (mxODBC)", is_imported("mxODBC")))
database_info.append(("Oracle (cx_Oracle)", is_imported("cx_Oracle")))
database_info.append(("PostgreSQL (PyGreSQL)", is_imported("pgdb")))
database_info.append(("PostgreSQL (Psycopg)", is_imported("psycopg2")))
database_info.append(("Python Data Objects (PyDO)", is_imported("PyDO")))
database_info.append(("SAP DB (sapdbapi)", is_imported("sapdbapi")))
database_info.append(("SQLite3", is_imported("sqlite3")))
return database_info
def get_compression_info(): # noqa: D103
compression_info = []
compression_info.append(("Bzip2 Support", is_imported("bz2")))
compression_info.append(("Gzip Support", is_imported("gzip")))
compression_info.append(("Tar Support", is_imported("tarfile")))
compression_info.append(("Zip Support", is_imported("zipfile")))
compression_info.append(("Zlib Support", is_imported("zlib")))
return compression_info
def get_socket_info(): # noqa: D103
socket_info = []
socket_info.append(("Hostname", socket.gethostname()))
socket_info.append(
("Hostname (fully qualified)", socket.gethostbyaddr(socket.gethostname())[0])
)
try:
socket_info.append(("IP Address", socket.gethostbyname(socket.gethostname())))
except Exception:
pass
socket_info.append(("IPv6 Support", getattr(socket, "has_ipv6", False)))
socket_info.append(("SSL Support", hasattr(socket, "ssl")))
return socket_info
def get_multimedia_info(): # noqa: D103
multimedia_info = []
multimedia_info.append(("AIFF Support", is_imported("aifc")))
multimedia_info.append(("Color System Conversion Support", is_imported("colorsys")))
multimedia_info.append(("curses Support", is_imported("curses")))
multimedia_info.append(("IFF Chunk Support", is_imported("chunk")))
multimedia_info.append(("Image Header Support", is_imported("imghdr")))
multimedia_info.append(("OSS Audio Device Support", is_imported("ossaudiodev")))
multimedia_info.append(("Raw Audio Support", is_imported("audioop")))
multimedia_info.append(("Raw Image Support", is_imported("imageop")))
multimedia_info.append(("SGI RGB Support", is_imported("rgbimg")))
multimedia_info.append(("Sound Header Support", is_imported("sndhdr")))
multimedia_info.append(("Sun Audio Device Support", is_imported("sunaudiodev")))
multimedia_info.append(("Sun AU Support", is_imported("sunau")))
multimedia_info.append(("Wave Support", is_imported("wave")))
return multimedia_info
def is_imported(module): # noqa: D103
if module in sys.modules:
return "enabled"
return "disabled"
info = _info()
|
{
"content_hash": "ecf9698eae7773a9995337bb34c53e8c",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 97,
"avg_line_length": 32.28225806451613,
"alnum_prop": 0.6324006994753935,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "358ab0389cfaebbd1172e439486f6d0ae48c177d",
"size": "8051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_hub/pyinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
}
|
from stellar_sdk import Asset
from stellar_sdk.call_builder.call_builder_async import ClaimableBalancesCallBuilder
from tests.call_builder.call_builder_async import client, horizon_url
class TestClaimableBalancesCallBuilder:
def test_init(self):
builder = ClaimableBalancesCallBuilder(horizon_url, client)
assert builder.endpoint == "claimable_balances"
assert builder.params == {}
def test_claimable_balance(self):
claimable_balance_id = (
"0000000043d380c38a2f2cac46ab63674064c56fdce6b977fdef1a278ad50e1a7e6a5e18"
)
builder = ClaimableBalancesCallBuilder(horizon_url, client).claimable_balance(
claimable_balance_id
)
assert builder.endpoint == f"claimable_balances/{claimable_balance_id}"
assert builder.params == {}
def test_for_claimant(self):
claimant = "GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH"
builder = ClaimableBalancesCallBuilder(horizon_url, client).for_claimant(
claimant
)
assert builder.endpoint == "claimable_balances"
assert builder.params == {"claimant": claimant}
def test_for_asset(self):
asset = Asset("BTC", "GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH")
builder = ClaimableBalancesCallBuilder(horizon_url, client).for_asset(asset)
assert builder.endpoint == "claimable_balances"
assert builder.params == {
"asset": "BTC:GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH"
}
def test_for_sponsor(self):
sponsor = "GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V"
builder = ClaimableBalancesCallBuilder(horizon_url, client).for_sponsor(sponsor)
assert builder.endpoint == "claimable_balances"
assert builder.params == {"sponsor": sponsor}
|
{
"content_hash": "eeaae27a3be92412be9bd4e3eaa32383",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 88,
"avg_line_length": 43.348837209302324,
"alnum_prop": 0.7086909871244635,
"repo_name": "StellarCN/py-stellar-base",
"id": "514272df68f05010b0a7c98d7ab41a267230da87",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/call_builder/call_builder_async/test_claimable_balances_call_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "2044193"
},
{
"name": "RPC",
"bytes": "76503"
}
],
"symlink_target": ""
}
|
import daemon
import signal
# Moved gevent imports into functions.
# See http://stackoverflow.com/q/11587164/298479
# TLDR: Importing gevent before forking is a bad idea
class GeventDaemonContext(daemon.DaemonContext):
""" DaemonContext for gevent.
Receive same options as a DaemonContext (python-daemon), Except:
`monkey`: None by default, does nothing. Else it can be a dict or
something that evaluate to True.
If it is True, it patches all. (gevent.monkey.patch_all()).
If it is a dict, it pass the dict as keywords arguments to patch_all().
`signal_map`: receives a dict of signals, but handler is either a
callable, a list of arguments [callable, arg1, arg2] or
a string.
callable without arguments will receive (signal, None) as arguments,
meaning the `frame` parameter is always None.
If the daemon context forks. It calls gevent.reinit().
"""
def __init__(self, monkey_greenlet_report=True, monkey=True, gevent_hub=None, signal_map=None, **daemon_options):
self.gevent_signal_map = signal_map
self.monkey = monkey
self.monkey_greenlet_report = monkey_greenlet_report
self.gevent_hub = gevent_hub
super(GeventDaemonContext, self).__init__(signal_map={}, **daemon_options)
def open(self):
super(GeventDaemonContext, self).open()
# always reinit even when not forked when registering signals
self._apply_monkey_patch()
import gevent
if self.gevent_hub is not None:
# gevent 1.0 only
gevent.get_hub(self.gevent_hub)
gevent.reinit()
self._setup_gevent_signals()
def _apply_monkey_patch(self):
import gevent
import gevent.monkey
if isinstance(self.monkey, dict):
gevent.monkey.patch_all(**self.monkey)
elif self.monkey:
gevent.monkey.patch_all()
if self.monkey_greenlet_report:
import logging
original_report = gevent.hub.Hub.print_exception
def print_exception(self, context, type, value, tb):
try:
logging.error("Error in greenlet: %s" % str(context), exc_info=(type, value, tb))
finally:
return original_report(self, context, type, value, tb)
gevent.hub.Hub.print_exception = print_exception
def _setup_gevent_signals(self):
import gevent
if self.gevent_signal_map is None:
gevent.signal(signal.SIGTERM, self.terminate, signal.SIGTERM, None)
return
for sig, target in self.gevent_signal_map.items():
if target is None:
raise ValueError('invalid handler argument for signal %s', str(sig))
tocall = target
args = [sig, None]
if isinstance(target, list):
if not target:
raise ValueError('handler list is empty for signal %s', str(sig))
tocall = target[0]
args = target[1:]
elif isinstance(target, basestring):
assert not target.startswith('_')
tocall = getattr(self, target)
gevent.signal(sig, tocall, *args)
|
{
"content_hash": "3457f2597f28a7028332b7aad2c53916",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 117,
"avg_line_length": 37.195402298850574,
"alnum_prop": 0.6165018541409147,
"repo_name": "luzfcb/maildump",
"id": "8447b77305ab24fb41874f406b55f486ed67bb2c",
"size": "3236",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "maildump_runner/geventdaemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9429"
},
{
"name": "HTML",
"bytes": "5197"
},
{
"name": "JavaScript",
"bytes": "271163"
},
{
"name": "Python",
"bytes": "30416"
},
{
"name": "Shell",
"bytes": "397"
}
],
"symlink_target": ""
}
|
import mock
from .common import BaseTest
class ShieldTest(BaseTest):
# most of the shield tests are embedded in other resources
def test_shield_sync(self):
# shield resources
p = self.load_policy(
{
"name": "elb-sync",
"resource": "elb",
"actions": [{"type": "set-shield", "sync": True, "state": True}],
}
)
client = mock.MagicMock()
client.delete_protection = delete = mock.Mock()
set_shield = p.resource_manager.actions[0]
with mock.patch.object(p.resource_manager, "get_arns") as mock_get_arn:
mock_get_arn.return_value = ["us-east-1:%s/lb" % i for i in map(str, range(5))]
with mock.patch.object(
p.resource_manager, "get_resource_manager"
) as mock_resource_manager:
mock_resource_manager.return_value = mock_resource_manager
mock_resource_manager.resources.return_value = map(str, range(5))
protections = [
{"Id": i, "ResourceArn": "us-east-1:%s/lb" % i} for i in map(str, range(10))
]
# One out of region
protections.extend(
[{'Id': 42, 'ResourceArn': "us-east-2:42/lb"}]
)
# App elb also present for elb shield
protections.extend(
[
{"Id": i, "ResourceArn": "us-east-1:%s/app/lb" % i}
for i in map(str, range(10, 15))
]
)
# Networkload load balancers also present for elb shield
protections.extend(
[
{"Id": i, "ResourceArn": "%s/net/lb" % i}
for i in map(str, range(10, 15))
]
)
set_shield.clear_stale(client, protections)
self.assertEqual(delete.call_count, 5)
for i in range(5, 10):
self.assertTrue(
mock.call(ProtectionId=str(i)) in delete.call_args_list
)
|
{
"content_hash": "c6ffa6579ad0b0535cc1b89d8c33a453",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 96,
"avg_line_length": 36.71666666666667,
"alnum_prop": 0.4684521107580572,
"repo_name": "Sutto/cloud-custodian",
"id": "4e32d86af3a219a06f59b759c450be7e3cd19cb2",
"size": "2795",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_shield.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5283859"
},
{
"name": "Shell",
"bytes": "12627"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
"""
[8/29/2014] Challenge #177 [Hard] SCRIPT it Language
https://www.reddit.com/r/dailyprogrammer/comments/2exnal/8292014_challenge_177_hard_script_it_language/
#Description:
We all enjoy strings. We all enjoy breaking up texts. Time to go bigger than just a few sentences.
Out of curiosity we will be breaking down a movie script. The movie I have picked is Monty Python and the Holy Grail.
So what do you mean by breaking it down? Our challenge is to crunch some numbers on this movie and figure out some fun
statistics.
You will first go get the text of this script off the web. Part of the challenge is how to deal with this.
I really like this [Monty Python and the Holy Grail Script] (http://www.sacred-texts.com/neu/mphg/mphg.htm) script of
the movie.
#By Scene:
* By Scene (From 1 to 36 in order) - how many words are spoken. (Anything between [] and () are not spoken words)
* Top 3 Spoken Words (and how many times they were used) and percentage of all the words spoken in that scene.
* List of all characters in the scene and next to them How many "Lines" and "Words" they used.
* The list of characters in scene should be sorted based on count of "Words" used from high to low in count.
* A "Line" is any sentence that ends with your typical end of sentence punctuation.
* Anything in [] or () we will call a "stage direction" Just count how many directions are given. Note: Words in a
stage direction do not count towards words spoken or used in script.
#By Whole Movie:
At the end of the crunch we want this data.
* Number of Lines
* Number of Words
* Number of Stage Directions
* Number of characters
* Sorted by most words the list of all Characters and how many Words and Lines they each got - Please also add a
percentage of total. So if a character spoke 100/1000 lines they will have Lines 100 (10%)
* Top 10 Words sorted in Order from Most to least (Ties count as 1 Spot so if the top 2 words are "The" and "A" then it
should be like 1) "The" "A"
* Top 3 Scenes with the most Words spoken (Again if ties - both are listed as 1 spot)
* In the movie there are a bunch of characters known as the Knights of Ni. They cannot say the word "it" (forbidden) -
Count how many times this forbidden word is used and list a count of "Forbidden Word of the Knights of Ni"
#Output:
Given the above you will have to format and display the data. I leave the design up to you. But it should be easy to
read and understand.
#Extra Challenge:
Find a way to show this data more meaningful than just list of hard data. Develop a Histogram or format the data into a
format that makes a cool looking pie chart/table/graph.
"""
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "57fc87783aa1f9c90bd61cc8d35953dd",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 119,
"avg_line_length": 54.97959183673469,
"alnum_prop": 0.7487008166295471,
"repo_name": "DayGitH/Python-Challenges",
"id": "a7ddf515b8d2ed96bf8b2fd30914ee323db6d1ae",
"size": "2694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20140829C.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
}
|
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
did_delete = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \
.delete()
print(did_delete)
|
{
"content_hash": "3acf8bcae1d132efd9797d65dfd6b2fa",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 29.6,
"alnum_prop": 0.7364864864864865,
"repo_name": "TwilioDevEd/api-snippets",
"id": "2eae2925a5bfadc70c5e385624ee483401355be6",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sync/rest/maps/delete-map/delete-map.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import new
import dis
import struct
__version__ = "0.0.1"
# TODO add docstring explanations
# TODO add support for python 3 and pypy
# TODO how to handle flags such as CO_FUTURE_UNICODE_LITERALS (check code.h)
# TODO reloading this clears all the vars... maybe make this object oriented ?
# TODO currently cannot remove the hookpoint completely
def s(short):
# opcode arguments are always unsigned short
return struct.pack('H', short)
def o(s):
return chr(dis.opmap[s])
def createbytecode(*inp):
ret = ''
for item in inp:
if isinstance(item, str):
ret += o(item)
else:
ret += s(item)
return ret
def getoraddtotuple(t, *names):
indexs = []
tmp = list(t)
added = False
for name in names:
try:
ind = tmp.index(name)
indexs.append(ind)
except ValueError:
added = True
tmp.append(name)
indexs.append(len(tmp) - 1)
if added:
t = tuple(tmp)
indexs.insert(0, t)
return indexs
def line2addr(func, line):
code = func.func_code
if line < 0:
lineno = code.co_firstlineno
line = -line
else:
lineno = 0
co_lnotab = code.co_lnotab
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
lineno += ord(line_incr)
if lineno == line:
return addr
return None
# TODO support scanning the insert_code as well.. currently does not (fix this if you want to make this a generic insert code)
def insertbytecode(co_code, addr, insert_code):
newcode = co_code[:addr] + insert_code + co_code[addr:]
insert_len = len(insert_code)
n = len(newcode)
i = 0
fixedcode = ''
while i < n:
c = newcode[i]
fixedcode += c
op = ord(c)
i += 1
if op >= dis.HAVE_ARGUMENT:
oparg = ord(newcode[i]) + ord(newcode[i + 1]) * 256
i += 2
if op in dis.hasjrel:
target = i + oparg
if i < addr and target >= addr:
oparg += insert_len
elif op in dis.hasjabs:
target = oparg
if i < addr and target >= addr:
oparg += insert_len
elif i > addr + insert_len and target >= addr:
oparg += insert_len
fixedcode += s(oparg)
return fixedcode
# TODO insert_len limited to 256 for now.. (fix this if you want to make this a generic insert code)
def fixlines(co_lnotab, insert_addr, insert_len):
byte_increments = [ord(c) for c in co_lnotab[0::2]]
line_increments = [ord(c) for c in co_lnotab[1::2]]
new_lnotab = ''
lineno = 0
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
addr += byte_incr
lineno += line_incr
if addr > insert_addr:
byte_incr += insert_len
new_lnotab += chr(byte_incr) + chr(line_incr)
return new_lnotab
hookpoints = {}
disabledhookpoints = {}
hookpointcounter = 0
mapping = {}
origin = {}
def run_hookpoint(num, _locals=None, _globals=None):
if num in hookpoints:
hookpoints[num](_locals, _globals)
def disable_hookpoint(num):
if num not in hookpoints:
raise Exception('Breakpoint not enabled')
disabledhookpoints[num] = hookpoints[num]
del hookpoints[num]
def enable_hookpoint(num):
if num not in disabledhookpoints:
raise Exception('Breakpoint not disabled')
hookpoints[num] = disabledhookpoints[num]
del disabledhookpoints[num]
def change_hookpoint(num, func):
if num in hookpoints:
hookpoints[num] = func
elif num in disabledhookpoints:
disabledhookpoints[num] = func
else:
raise Exception('Breakpoint not found')
def list_hookpoints():
for k, v in origin.iteritems():
if k in hookpoints:
print('E', k, v)
else:
print('D', k, v)
def runpdb(_locals=None, _globals=None):
import pdb
pdb.set_trace()
# TODO check that closures and nested functions work here as well
def hook(func, lineno=None, insert_func=runpdb, with_state=False):
global hookpointcounter
hookpoints[hookpointcounter] = insert_func
code = func.func_code
newconsts, noneindex, minusoneindex, hookpointindex = getoraddtotuple(code.co_consts, None, -1, hookpointcounter)
newnames, replaceindex, runhookpointindex = getoraddtotuple(code.co_names, __name__, 'run_hookpoint')
if with_state:
newnames, localsindex, globalsindex = getoraddtotuple(newnames, 'locals', 'globals')
pdbtracecode = createbytecode('LOAD_CONST', minusoneindex, 'LOAD_CONST', noneindex, 'IMPORT_NAME', replaceindex, 'LOAD_ATTR', runhookpointindex, 'LOAD_CONST', hookpointindex, 'LOAD_GLOBAL', localsindex, 'CALL_FUNCTION', 0, 'LOAD_GLOBAL', globalsindex, 'CALL_FUNCTION', 0, 'CALL_FUNCTION', 3, 'POP_TOP')
else:
pdbtracecode = createbytecode('LOAD_CONST', minusoneindex, 'LOAD_CONST', noneindex, 'IMPORT_NAME', replaceindex, 'LOAD_ATTR', runhookpointindex, 'LOAD_CONST', hookpointindex, 'CALL_FUNCTION', 1, 'POP_TOP')
if lineno is None:
newcode = insertbytecode(code.co_code, 0, pdbtracecode)
newlnotab = fixlines(code.co_lnotab, 0, len(pdbtracecode))
else:
addr = line2addr(func, lineno)
if addr is None:
raise Exception('Line not found')
newcode = insertbytecode(code.co_code, addr, pdbtracecode)
newlnotab = fixlines(code.co_lnotab, addr, len(pdbtracecode))
# TODO is this correct ?
newstacksize = code.co_stacksize + 4 if with_state else 2
newfunc = new.code(code.co_argcount, code.co_nlocals, newstacksize, code.co_flags, newcode, newconsts, newnames, code.co_varnames, code.co_filename, code.co_name, code.co_firstlineno, newlnotab, code.co_freevars, code.co_cellvars)
# TODO make this thread safe (index returning number)
hookpointcounter += 1
if func.func_code in mapping:
mapping[newfunc] = mapping[func.func_code]
else:
mapping[newfunc] = func.func_code
origin[hookpointcounter - 1] = mapping[newfunc]
func.func_code = newfunc
return hookpointcounter - 1
|
{
"content_hash": "bb48b898fa0b4dda8a432af5b7306675",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 310,
"avg_line_length": 33.55154639175258,
"alnum_prop": 0.608081118451375,
"repo_name": "tzickel/bytehook",
"id": "facae8b92fd5446e159457caf92cb66e3597395d",
"size": "6509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bytehook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8798"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.util import (display_for_field, flatten_fieldsets,
label_for_field, lookup_field, NestedObjects)
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
from django.contrib.sites.models import Site
from django.db import models, DEFAULT_DB_ALIAS
from django import forms
from django.test import SimpleTestCase, TestCase
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from django.utils import six
from .models import Article, Count, Event, Location, EventGuide
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
Check that the nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
class UtilTests(SimpleTestCase):
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin(object):
def get_admin_value(self, obj):
return ADMIN_METHOD
simple_function = lambda obj: SIMPLE_FUNCTION
article = Article(
site=Site(domain=SITE_NAME),
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
))
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.TimeField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField())
expected = '<img src="%sadmin/img/icon-unknown.gif" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
display_value = display_for_field(None, models.FloatField())
self.assertEqual(display_value, EMPTY_CHANGELIST_VALUE)
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("title2", Article),
"another name"
)
self.assertEqual(
label_for_field("title2", Article, return_attr=True),
("another name", None)
)
self.assertEqual(
label_for_field("__unicode__", Article),
"article"
)
self.assertEqual(
label_for_field("__str__", Article),
str("article")
)
self.assertRaises(
AttributeError,
lambda: label_for_field("unknown", Article)
)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
class MockModelAdmin(object):
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article,
model_admin = MockModelAdmin,
return_attr = True
),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin(object):
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_logentry_unicode(self):
"""
Regression test for #15661
"""
log_entry = admin.models.LogEntry()
log_entry.action_flag = admin.models.ADDITION
self.assertTrue(
six.text_type(log_entry).startswith('Added ')
)
log_entry.action_flag = admin.models.CHANGE
self.assertTrue(
six.text_type(log_entry).startswith('Changed ')
)
log_entry.action_flag = admin.models.DELETION
self.assertTrue(
six.text_type(log_entry).startswith('Deleted ')
)
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(six.text_type(log_entry), 'LogEntry Object')
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i>:</label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb:</label>')
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
|
{
"content_hash": "25c3bf117d580c32d77b0802ed4a58d3",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 108,
"avg_line_length": 33.88288288288288,
"alnum_prop": 0.577683240272977,
"repo_name": "makinacorpus/django",
"id": "637f6432619bcf79a49ebfe0b6903e0ab25c08fe",
"size": "11283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/admin_util/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98175"
},
{
"name": "Python",
"bytes": "8391980"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
@register.simple_tag
def current(request, pattern):
import re
try:
if re.search(pattern, request.path):
return 'nav-current'
except:
return ''
@register.filter
def disqus_hash(value):
return value.replace("/","_")
|
{
"content_hash": "8f48338bf96fe3bd18ba2fc8062968e3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 44,
"avg_line_length": 17.833333333333332,
"alnum_prop": 0.6386292834890965,
"repo_name": "jstoledano/nspaces",
"id": "2029b70f14e178cb9b93926faf00dddf081bf9b6",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/blog/templatetags/blog_extra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33602"
},
{
"name": "HTML",
"bytes": "192913"
},
{
"name": "JavaScript",
"bytes": "17838"
},
{
"name": "Python",
"bytes": "45721"
}
],
"symlink_target": ""
}
|
from .shop import Shop
from .product import Product
from .cart import Cart
from .custom_collection import CustomCollection
from .collect import Collect
from .shipping_address import ShippingAddress
from .billing_address import BillingAddress
from .line_item import LineItem
from .shipping_line import ShippingLine
from .note_attribute import NoteAttribute
from .address import Address
from .option import Option
from .payment_details import PaymentDetails
from .receipt import Receipt
from .rule import Rule
from .tax_line import TaxLine
from .script_tag import ScriptTag
from .product_search_engine import ProductSearchEngine
from .application_charge import ApplicationCharge
from .recurring_application_charge import RecurringApplicationCharge
from .asset import Asset
from .theme import Theme
from .customer_saved_search import CustomerSavedSearch
from .customer_group import CustomerGroup
from .customer import Customer
from .event import Event
from .webhook import Webhook
from .redirect import Redirect
from .province import Province
from .comment import Comment
from .metafield import Metafield
from .article import Article
from .blog import Blog
from .page import Page
from .country import Country
from .fulfillment import Fulfillment
from .fulfillment_service import FulfillmentService
from .carrier_service import CarrierService
from .transaction import Transaction
from .image import Image
from .variant import Variant
from .order import Order
from .order_risk import OrderRisk
from .smart_collection import SmartCollection
from ..base import ShopifyResource
|
{
"content_hash": "85986baa22cb5cc9a725ea761503e676",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 68,
"avg_line_length": 34.15217391304348,
"alnum_prop": 0.8421387651177594,
"repo_name": "varesa/shopify_python_api",
"id": "619bdf64e87c5b218dd730be64f29ead6bdc4a95",
"size": "1571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shopify/resources/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71467"
}
],
"symlink_target": ""
}
|
__version__=''' $Id: PyFontify.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""
Module to analyze Python source code; for syntax coloring tools.
Interface::
tags = fontify(pytext, searchfrom, searchto)
- The 'pytext' argument is a string containing Python source code.
- The (optional) arguments 'searchfrom' and 'searchto' may contain a slice in pytext.
- The returned value is a list of tuples, formatted like this::
[('keyword', 0, 6, None), ('keyword', 11, 17, None), ('comment', 23, 53, None), etc. ]
- The tuple contents are always like this::
(tag, startindex, endindex, sublist)
- tag is one of 'keyword', 'string', 'comment' or 'identifier'
- sublist is not used, hence always None.
"""
# Based on FontText.py by Mitchell S. Chapman,
# which was modified by Zachary Roadhouse,
# then un-Tk'd by Just van Rossum.
# Many thanks for regular expression debugging & authoring are due to:
# Tim (the-incredib-ly y'rs) Peters and Cristian Tismer
# So, who owns the copyright? ;-) How about this:
# Copyright 1996-2001:
# Mitchell S. Chapman,
# Zachary Roadhouse,
# Tim Peters,
# Just van Rossum
__version__ = "0.4"
import re
# First a little helper, since I don't like to repeat things. (Tismer speaking)
def replace(src, sep, rep):
return rep.join(src.split(sep))
# This list of keywords is taken from ref/node13.html of the
# Python 1.3 HTML documentation. ("access" is intentionally omitted.)
keywordsList = [
"as", "assert", "exec",
"del", "from", "lambda", "return",
"and", "elif", "global", "not", "try",
"break", "else", "if", "or", "while",
"class", "except", "import", "pass",
"continue", "finally", "in", "print",
"def", "for", "is", "raise", "yield"]
# Build up a regular expression which will match anything
# interesting, including multi-line triple-quoted strings.
commentPat = r"#[^\n]*"
pat = r"q[^\\q\n]*(\\[\000-\377][^\\q\n]*)*q"
quotePat = replace(pat, "q", "'") + "|" + replace(pat, 'q', '"')
# Way to go, Tim!
pat = r"""
qqq
[^\\q]*
(
( \\[\000-\377]
| q
( \\[\000-\377]
| [^\q]
| q
( \\[\000-\377]
| [^\\q]
)
)
)
[^\\q]*
)*
qqq
"""
pat = ''.join(pat.split()) # get rid of whitespace
tripleQuotePat = replace(pat, "q", "'") + "|" + replace(pat, 'q', '"')
# Build up a regular expression which matches all and only
# Python keywords. This will let us skip the uninteresting
# identifier references.
# nonKeyPat identifies characters which may legally precede
# a keyword pattern.
nonKeyPat = r"(^|[^a-zA-Z0-9_.\"'])"
keyPat = nonKeyPat + "(" + "|".join(keywordsList) + ")" + nonKeyPat
matchPat = commentPat + "|" + keyPat + "|" + tripleQuotePat + "|" + quotePat
matchRE = re.compile(matchPat)
idKeyPat = "[ \t]*[A-Za-z_][A-Za-z_0-9.]*" # Ident w. leading whitespace.
idRE = re.compile(idKeyPat)
def fontify(pytext, searchfrom = 0, searchto = None):
if searchto is None:
searchto = len(pytext)
# Cache a few attributes for quicker reference.
search = matchRE.search
idSearch = idRE.search
tags = []
tags_append = tags.append
commentTag = 'comment'
stringTag = 'string'
keywordTag = 'keyword'
identifierTag = 'identifier'
start = 0
end = searchfrom
while 1:
m = search(pytext, end)
if m is None:
break # EXIT LOOP
start = m.start()
if start >= searchto:
break # EXIT LOOP
match = m.group(0)
end = start + len(match)
c = match[0]
if c not in "#'\"":
# Must have matched a keyword.
if start != searchfrom:
# there's still a redundant char before and after it, strip!
match = match[1:-1]
start = start + 1
else:
# this is the first keyword in the text.
# Only a space at the end.
match = match[:-1]
end = end - 1
tags_append((keywordTag, start, end, None))
# If this was a defining keyword, look ahead to the
# following identifier.
if match in ["def", "class"]:
m = idSearch(pytext, end)
if m is not None:
start = m.start()
if start == end:
match = m.group(0)
end = start + len(match)
tags_append((identifierTag, start, end, None))
elif c == "#":
tags_append((commentTag, start, end, None))
else:
tags_append((stringTag, start, end, None))
return tags
def test(path):
f = open(path)
text = f.read()
f.close()
tags = fontify(text)
for tag, start, end, sublist in tags:
print tag, repr(text[start:end])
|
{
"content_hash": "3d83e729f2a2de086afab9db6534fd26",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 90,
"avg_line_length": 32.20886075949367,
"alnum_prop": 0.5405777166437414,
"repo_name": "nickpack/reportlab",
"id": "cf584272fe4f44b470c4b40231d6fc75c10f690c",
"size": "5171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/reportlab/lib/PyFontify.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "782870"
},
{
"name": "C++",
"bytes": "1390"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "3275159"
},
{
"name": "Shell",
"bytes": "1736"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('slug', models.SlugField(unique=True, max_length=200)),
('publish', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-created'],
'verbose_name': 'Blog Entry',
'verbose_name_plural': 'Blog Entries',
},
),
]
|
{
"content_hash": "d99cf48c585c81b774cca10bb4c12abd",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 33.03448275862069,
"alnum_prop": 0.5271398747390397,
"repo_name": "jgsjv/treinamento_django",
"id": "ccea001aa9bfc3a1f3c481a8179f3a5442db2662",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/blogapp/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13731"
},
{
"name": "Python",
"bytes": "24150"
}
],
"symlink_target": ""
}
|
import urllib2
import bs4
from random import shuffle
import os
#from unicodedata import normalize
url_base = 'http://www.weeklysh.com/eNews/Wap.aspx?ID='
ids = range(150340, 151531)
shuffle(ids)
i = ids.index(151414)
ids = [ids[i]] + ids[:i] + ids[i+1:]
os.chdir('/Users/jialing/Shanghai')
for id in ids:
page = urllib2.urlopen(url_base + str(id))
soup = bs4.BeautifulSoup(page.read())
try:
title, content = soup.findAll('div')
except:
continue
title = title.getText().strip()
content = content.getText('\n' * 2).strip()
try:
author, content = content.split('\n', 1)
except:
continue
filename = author + '/' + title + '.txt'
#filename = normalize('NFC', filename).encode('utf-8')
if len(author) > 10: # skip long names
continue
if not os.path.exists(author):
try:
os.makedirs(author)
except:
continue
try:
with open(filename, 'w') as f:
to_write = title + '\n' * 2 + author + '\n' * 2 + content + '\n' * 2
to_write = to_write.encode('utf-8')
f.write(to_write)
except:
continue
print id, author, title
|
{
"content_hash": "e72fbd39670a5f01b018dcf94823880a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.5752508361204013,
"repo_name": "jialing3/corner_cases",
"id": "9a31cf79eefa0c71c67d72afa3b1852ed49618e2",
"size": "1196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algo_fun/Shanghai_news_scrape_for_my_mom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "22409"
},
{
"name": "Jupyter Notebook",
"bytes": "174621"
},
{
"name": "Python",
"bytes": "98659"
},
{
"name": "Scala",
"bytes": "749"
}
],
"symlink_target": ""
}
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
if check_input:
X = check_array(X, dtype=DTYPE)
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The infered value of max_features.
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
{
"content_hash": "de43d067fea4158440a6627bf3d5b05d",
"timestamp": "",
"source": "github",
"line_count": 778,
"max_line_length": 80,
"avg_line_length": 38.412596401028274,
"alnum_prop": 0.564664547431822,
"repo_name": "soulmachine/scikit-learn",
"id": "a60f1a6ccca25e86e8e624cdfda9273ac40ea8cc",
"size": "29885",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sklearn/tree/tree.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import unittest
import platform
import os.path
import tempfile
import lit
from lit.TestRunner import ParserKind, IntegratedTestKeywordParser, \
parseIntegratedTestScript
class TestIntegratedTestKeywordParser(unittest.TestCase):
inputTestCase = None
@staticmethod
def load_keyword_parser_lit_tests():
"""
Create and load the LIT test suite and test objects used by
TestIntegratedTestKeywordParser
"""
# Create the global config object.
lit_config = lit.LitConfig.LitConfig(progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
singleProcess=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=(
platform.system() == 'Windows'),
params={})
TestIntegratedTestKeywordParser.litConfig = lit_config
# Perform test discovery.
test_path = os.path.dirname(os.path.dirname(__file__))
inputs = [os.path.join(test_path, 'Inputs/testrunner-custom-parsers/')]
assert os.path.isdir(inputs[0])
run = lit.run.Run(lit_config,
lit.discovery.find_tests_for_inputs(lit_config, inputs))
assert len(run.tests) == 1 and "there should only be one test"
TestIntegratedTestKeywordParser.inputTestCase = run.tests[0]
@staticmethod
def make_parsers():
def custom_parse(line_number, line, output):
if output is None:
output = []
output += [part for part in line.split(' ') if part.strip()]
return output
return [
IntegratedTestKeywordParser("MY_TAG.", ParserKind.TAG),
IntegratedTestKeywordParser("MY_DNE_TAG.", ParserKind.TAG),
IntegratedTestKeywordParser("MY_LIST:", ParserKind.LIST),
IntegratedTestKeywordParser("MY_RUN:", ParserKind.COMMAND),
IntegratedTestKeywordParser("MY_CUSTOM:", ParserKind.CUSTOM,
custom_parse)
]
@staticmethod
def get_parser(parser_list, keyword):
for p in parser_list:
if p.keyword == keyword:
return p
assert False and "parser not found"
@staticmethod
def parse_test(parser_list):
script = parseIntegratedTestScript(
TestIntegratedTestKeywordParser.inputTestCase,
additional_parsers=parser_list, require_script=False)
assert not isinstance(script, lit.Test.Result)
assert isinstance(script, list)
assert len(script) == 0
def test_tags(self):
parsers = self.make_parsers()
self.parse_test(parsers)
tag_parser = self.get_parser(parsers, 'MY_TAG.')
dne_tag_parser = self.get_parser(parsers, 'MY_DNE_TAG.')
self.assertTrue(tag_parser.getValue())
self.assertFalse(dne_tag_parser.getValue())
def test_lists(self):
parsers = self.make_parsers()
self.parse_test(parsers)
list_parser = self.get_parser(parsers, 'MY_LIST:')
self.assertEqual(list_parser.getValue(),
['one', 'two', 'three', 'four'])
def test_commands(self):
parsers = self.make_parsers()
self.parse_test(parsers)
cmd_parser = self.get_parser(parsers, 'MY_RUN:')
value = cmd_parser.getValue()
self.assertEqual(len(value), 2) # there are only two run lines
self.assertEqual(value[0].strip(), "%dbg(MY_RUN: at line 4) baz")
self.assertEqual(value[1].strip(), "%dbg(MY_RUN: at line 7) foo bar")
def test_custom(self):
parsers = self.make_parsers()
self.parse_test(parsers)
custom_parser = self.get_parser(parsers, 'MY_CUSTOM:')
value = custom_parser.getValue()
self.assertEqual(value, ['a', 'b', 'c'])
def test_bad_keywords(self):
def custom_parse(line_number, line, output):
return output
try:
IntegratedTestKeywordParser("TAG_NO_SUFFIX", ParserKind.TAG),
self.fail("TAG_NO_SUFFIX failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("TAG_NO_SUFFIX raised the wrong exception: %r" % e)
try:
IntegratedTestKeywordParser("TAG_WITH_COLON:", ParserKind.TAG),
self.fail("TAG_WITH_COLON: failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("TAG_WITH_COLON: raised the wrong exception: %r" % e)
try:
IntegratedTestKeywordParser("LIST_WITH_DOT.", ParserKind.LIST),
self.fail("LIST_WITH_DOT. failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("LIST_WITH_DOT. raised the wrong exception: %r" % e)
try:
IntegratedTestKeywordParser("CUSTOM_NO_SUFFIX",
ParserKind.CUSTOM, custom_parse),
self.fail("CUSTOM_NO_SUFFIX failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("CUSTOM_NO_SUFFIX raised the wrong exception: %r" % e)
# Both '.' and ':' are allowed for CUSTOM keywords.
try:
IntegratedTestKeywordParser("CUSTOM_WITH_DOT.",
ParserKind.CUSTOM, custom_parse),
except BaseException as e:
self.fail("CUSTOM_WITH_DOT. raised an exception: %r" % e)
try:
IntegratedTestKeywordParser("CUSTOM_WITH_COLON:",
ParserKind.CUSTOM, custom_parse),
except BaseException as e:
self.fail("CUSTOM_WITH_COLON: raised an exception: %r" % e)
try:
IntegratedTestKeywordParser("CUSTOM_NO_PARSER:",
ParserKind.CUSTOM),
self.fail("CUSTOM_NO_PARSER: failed to raise an exception")
except ValueError as e:
pass
except BaseException as e:
self.fail("CUSTOM_NO_PARSER: raised the wrong exception: %r" % e)
if __name__ == '__main__':
TestIntegratedTestKeywordParser.load_keyword_parser_lit_tests()
unittest.main(verbosity=2)
|
{
"content_hash": "26835af3faa8695dc11caf2c6b6b45ed",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 82,
"avg_line_length": 40.8562874251497,
"alnum_prop": 0.5559138208998974,
"repo_name": "youtube/cobalt_sandbox",
"id": "89209d80f5557ec267fcbbd73f45eaadcd3bda55",
"size": "6854",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/llvm/utils/lit/tests/unit/TestRunner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
#### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
|
{
"content_hash": "0a00528ba89583d787c726436c7b82fe",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 94,
"avg_line_length": 32.201219512195124,
"alnum_prop": 0.6983525847377391,
"repo_name": "gdi2290/django",
"id": "207fa780a4df9251a54e271733779c688a9b394f",
"size": "5281",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/contrib/gis/geos/libgeos.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10365282"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
from django import forms
# modules from django
from django.forms.extras.widgets import SelectDateWidget
from . import choices
from .models import *
import datetime
class GoodInquiryForm(forms.Form):
kind = forms.MultipleChoiceField(required = False, label = '種類(可複選)', widget=forms.CheckboxSelectMultiple, choices = choices.KIND_CHOICES)
department = forms.MultipleChoiceField(required = False, label = '部門(可複選)', widget=forms.CheckboxSelectMultiple, choices = choices.DEPARTMENT_CHOICES_SORTED_ADD_ALL)
toDepartment = forms.MultipleChoiceField(required = False, label = '調入部門(可複選)', widget=forms.CheckboxSelectMultiple, choices = choices.DEPARTMENT_CHOICES_SORTED_ADD_ALL)
type = forms.CharField(required = False, widget=forms.TextInput(attrs={'size': '20'}), label = '型號', max_length = 50)
partNumber = forms.CharField(required = False, label = '料號', widget=forms.TextInput(attrs={'size': '14'}), max_length = 13)
fromDate = forms.DateField(required = False, label = '起始日期', widget = SelectDateWidget(years = [y for y in range(2015,2030)]), initial=(datetime.date.today() - datetime.timedelta(days=3)))
toDate = forms.DateField(required = False, label = '終止日期', widget = SelectDateWidget(years = [y for y in range(2015,2030)]), initial = datetime.date.today())
status = forms.MultipleChoiceField(required = False, label = '狀態(可複選)', widget = forms.CheckboxSelectMultiple, choices = choices.WASTAGE_STATUS_CHOICES)
person = forms.CharField(required = False, label = '人名', widget=forms.TextInput(attrs={'size': '5'}), max_length = 6)
toPerson = forms.CharField(required = False, label = '借入人', widget=forms.TextInput(attrs={'size': '5'}), max_length = 6)
class GoodRequisitionForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '領用部門')
personEx = forms.CharField(label = '領用人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodRequisition
fields = ['quantity', 'department', 'personEx', 'datetime', 'remark']
#exclude = ('good', 'who')
widgets = {
'remark': forms.Textarea(attrs={'cols': 22, 'rows': 11}),
}
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
class GoodRequisitionOuterForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '領用部門')
personEx = forms.CharField(label = '領用人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
fromDepartment = forms.ModelChoiceField(queryset = Department.objects.all(), label = '調出部門')
personFromEx = forms.CharField(label = '調撥人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_fromDepartment option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodRequisition
fields = ['quantity', 'fromDepartment', 'personFromEx', 'department', 'personEx', 'datetime', 'remark']
#exclude = ('good', 'who')
widgets = {
'remark': forms.Textarea(attrs={'cols': 22, 'rows': 11}),
}
def clean_department(self):
fromDepartment = self.cleaned_data.get('fromDepartment')
department = self.cleaned_data.get('department')
if fromDepartment == department:
raise forms.ValidationError('不得為同部門')
return department
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
def clean_personFromEx(self):
personFromEx = self.cleaned_data.get('personFromEx')
fromDepartment = self.cleaned_data.get('fromDepartment')
data = personFromEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif fromDepartment:
if data[0].strip() != fromDepartment.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), fromDepartment.name))
return personFromEx
class GoodBackForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '歸還部門')
personEx = forms.CharField(label = '歸還人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodBack
fields = ['quantity', 'department', 'personEx', 'datetime', 'remark']
#exclude = ('good', 'who')
widgets = {
'remark': forms.Textarea(attrs={'cols': 22, 'rows': 11}),
}
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
class GoodBackOuterForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '歸還部門')
personEx = forms.CharField(label = '歸還人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
toDepartment = forms.ModelChoiceField(queryset = Department.objects.all(), label = '調入部門')
class Meta:
model = GoodBack
fields = ['quantity', 'department', 'personEx', 'toDepartment', 'datetime', 'remark']
#exclude = ('good', 'who')
widgets = {
'remark': forms.Textarea(attrs={'cols': 22, 'rows': 11}),
}
def clean_toDepartment(self):
toDepartment = self.cleaned_data.get('toDepartment')
department = self.cleaned_data.get('department')
if toDepartment == department:
raise forms.ValidationError('不得為同部門')
return toDepartment
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
def clean_personToEx(self):
personToEx = self.cleaned_data.get('personToEx')
toDepartment = self.cleaned_data.get('toDepartment')
data = personToEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif toDepartment:
if data[0].strip() != toDepartment.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), toDepartment.name))
return personToEx
#class GoodInventoryForm(forms.Form):
# department = forms.ChoiceField(label = '部門', widget = forms.Select, choices = choices.DEPARTMENT_CHOICES_SORTED)
# quantity = forms.IntegerField(label = '數量', min_value = 1)
# remark = forms.CharField(required = False, label = '備註', widget = forms.Textarea(attrs={'cols': 20, 'rows': 10}))
class GoodBuyForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '購買部門')
personEx = forms.CharField(label = '購買人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodBuy
fields = ['quantity', 'department', 'personEx', 'date', 'pr', 'po', 'remark']
#exclude = ('good', 'who',)
widgets = {
'remark': forms.Textarea(attrs={'cols': 20, 'rows': 7}),
}
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
class GoodAllocateForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
fromDepartment = forms.ModelChoiceField(queryset = Department.objects.all(), label = '調出部門')
personEx = forms.CharField(label = '調出人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_fromDepartment option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodAllocate
fields = ['quantity', 'fromDepartment', 'personEx', 'toDepartment', 'datetime', 'remark']
#exclude = ('good', 'who')
widgets = {
'remark': forms.Textarea(attrs={'cols': 20, 'rows': 10}),
}
def clean_toDepartment(self):
fromDepartment = self.cleaned_data.get('fromDepartment')
toDepartment = self.cleaned_data.get('toDepartment')
if fromDepartment == toDepartment:
raise forms.ValidationError('不得為同部門')
return toDepartment
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
fromDepartment = self.cleaned_data.get('fromDepartment')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif fromDepartment:
if data[0].strip() != fromDepartment.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), fromDepartment.name))
return personEx
class GoodWastageForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '耗損部門')
personEx = forms.CharField(label = '耗損人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodWastage
fields = ['quantity', 'department', 'personEx', 'status', 'datetime', 'remark']
#exclude = ('good', 'who',)
widgets = {
'remark': forms.Textarea(attrs={'cols': 20, 'rows': 8}),
}
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
class GoodRepairForm(forms.ModelForm):
quantity = forms.IntegerField(label = '數量', min_value = 1)
department = forms.ModelChoiceField(queryset = Department.objects.all(), label = '維修部門')
personEx = forms.CharField(label = '維修人', widget=forms.TextInput(attrs={'size': '20', 'onkeyup':"changeList(tempList, 'id_rightList', 'id_rightListSel', $('#id_department option:selected').text(), this.value)", 'onfocus':'this.select();tempList=peopleList;focusID=this.id'}), max_length = 20)
class Meta:
model = GoodRepair
fields = ['quantity', 'department', 'personEx', 'date', 'remark']
widgets = {
'remark': forms.Textarea(attrs={'cols': 20, 'rows': 7}),
}
def clean_personEx(self):
personEx = self.cleaned_data.get('personEx')
department = self.cleaned_data.get('department')
data = personEx.split(':')
if len(data) != 2:
raise forms.ValidationError('格式錯誤,需為「部門 : 名字」')
elif department:
if data[0].strip() != department.name:
raise forms.ValidationError('部門不一致 ({1}, {0})'.format(data[0].strip(), department.name))
return personEx
|
{
"content_hash": "ba52ce91df46f9f2ca716e58d938c189",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 304,
"avg_line_length": 50.79020979020979,
"alnum_prop": 0.6232961586121437,
"repo_name": "z-Wind/warehouse",
"id": "0284f068140fc98b12853e1a7be86b33ab89ca72",
"size": "15180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goodsManage/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46701"
},
{
"name": "HTML",
"bytes": "46119"
},
{
"name": "JavaScript",
"bytes": "149149"
},
{
"name": "Python",
"bytes": "121983"
}
],
"symlink_target": ""
}
|
from google.appengine.ext import db
class DoRun(db.Model):
runTrue = db.BooleanProperty()
|
{
"content_hash": "dc061fdb69c95468129fd47dcacd3776",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 35,
"avg_line_length": 24.5,
"alnum_prop": 0.7244897959183674,
"repo_name": "shickey/BearStatus",
"id": "56fc950da89e84db0eb03b874ea1fa0aff9a7a74",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/do_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12742"
},
{
"name": "HTML",
"bytes": "32287"
},
{
"name": "JavaScript",
"bytes": "5075"
},
{
"name": "Python",
"bytes": "225143"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_rocks_small_fog_green.iff"
result.attribute_template_id = -1
result.stfName("lair_n","rocks")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "4341416580cb7b014c21304263b08d00",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.692063492063492,
"repo_name": "anhstudios/swganh",
"id": "4083c8a30eabce5cd7d8fe546be67f373638ab57",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_rocks_small_fog_green.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""Set up some common test helper things."""
import asyncio
import functools
import logging
import os
from unittest.mock import patch
import pytest
import requests_mock as _requests_mock
from homeassistant import util
from homeassistant.util import location
from homeassistant.auth.const import GROUP_ID_ADMIN, GROUP_ID_READ_ONLY
from homeassistant.auth.providers import legacy_api_password, homeassistant
from tests.common import (
async_test_home_assistant,
INSTANCES,
mock_coro,
mock_storage as mock_storage,
MockUser,
CLIENT_ID,
)
from tests.test_util.aiohttp import mock_aiohttp_client
if os.environ.get("UVLOOP") == "1":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
def check_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
def guard_func(*args, **kwargs):
real = kwargs.pop("_test_real", None)
if not real:
raise Exception(
'Forgot to mock or pass "_test_real=True" to %s', func.__name__
)
return func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.async_detect_location_info = check_real(location.async_detect_location_info)
util.get_local_ip = lambda: "127.0.0.1"
@pytest.fixture(autouse=True)
def verify_cleanup():
"""Verify that the test has cleaned up resources correctly."""
yield
if len(INSTANCES) >= 2:
count = len(INSTANCES)
for inst in INSTANCES:
inst.stop()
pytest.exit(
"Detected non stopped instances " "({}), aborting test run".format(count)
)
@pytest.fixture
def hass_storage():
"""Fixture to mock storage."""
with mock_storage() as stored_data:
yield stored_data
@pytest.fixture
def hass(loop, hass_storage):
"""Fixture to provide a test instance of HASS."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
yield hass
loop.run_until_complete(hass.async_stop(force=True))
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
@pytest.fixture
def mock_device_tracker_conf():
"""Prevent device tracker from reading/writing data."""
devices = []
async def mock_update_config(path, id, entity):
devices.append(entity)
with patch(
"homeassistant.components.device_tracker.legacy"
".DeviceTracker.async_update_config",
side_effect=mock_update_config,
), patch(
"homeassistant.components.device_tracker.legacy.async_load_config",
side_effect=lambda *args: mock_coro(devices),
):
yield devices
@pytest.fixture
def hass_access_token(hass, hass_admin_user):
"""Return an access token to access Home Assistant."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_admin_user, CLIENT_ID)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def hass_owner_user(hass, local_auth):
"""Return a Home Assistant admin user."""
return MockUser(is_owner=True).add_to_hass(hass)
@pytest.fixture
def hass_admin_user(hass, local_auth):
"""Return a Home Assistant admin user."""
admin_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_ADMIN)
)
return MockUser(groups=[admin_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_user(hass, local_auth):
"""Return a Home Assistant read only user."""
read_only_group = hass.loop.run_until_complete(
hass.auth.async_get_group(GROUP_ID_READ_ONLY)
)
return MockUser(groups=[read_only_group]).add_to_hass(hass)
@pytest.fixture
def hass_read_only_access_token(hass, hass_read_only_user):
"""Return a Home Assistant read only user."""
refresh_token = hass.loop.run_until_complete(
hass.auth.async_create_refresh_token(hass_read_only_user, CLIENT_ID)
)
return hass.auth.async_create_access_token(refresh_token)
@pytest.fixture
def legacy_auth(hass):
"""Load legacy API password provider."""
prv = legacy_api_password.LegacyApiPasswordAuthProvider(
hass,
hass.auth._store,
{"type": "legacy_api_password", "api_password": "test-password"},
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def local_auth(hass):
"""Load local auth provider."""
prv = homeassistant.HassAuthProvider(
hass, hass.auth._store, {"type": "homeassistant"}
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
@pytest.fixture
def hass_client(hass, aiohttp_client, hass_access_token):
"""Return an authenticated HTTP client."""
async def auth_client():
"""Return an authenticated client."""
return await aiohttp_client(
hass.http.app,
headers={"Authorization": "Bearer {}".format(hass_access_token)},
)
return auth_client
|
{
"content_hash": "f6fffbcd040b4e1a5b5fb31383dc2520",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 85,
"avg_line_length": 27.214285714285715,
"alnum_prop": 0.6730408698912635,
"repo_name": "Cinntax/home-assistant",
"id": "36c0f52f41a6b874174cc52cafb061bcaae20b0b",
"size": "5334",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.account_analytics import AccountAnalytics # noqa: E501
from talon_one.rest import ApiException
class TestAccountAnalytics(unittest.TestCase):
"""AccountAnalytics unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test AccountAnalytics
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.account_analytics.AccountAnalytics() # noqa: E501
if include_optional :
return AccountAnalytics(
applications = 11,
live_applications = 6,
sandbox_applications = 2,
campaigns = 35,
active_campaigns = 15,
live_active_campaigns = 10,
coupons = 850,
active_coupons = 650,
expired_coupons = 200,
referral_codes = 500,
active_referral_codes = 100,
expired_referral_codes = 400,
active_rules = 35,
users = 56,
roles = 10,
custom_attributes = 18,
webhooks = 2,
loyalty_programs = 5,
live_loyalty_programs = 5
)
else :
return AccountAnalytics(
applications = 11,
live_applications = 6,
sandbox_applications = 2,
campaigns = 35,
active_campaigns = 15,
live_active_campaigns = 10,
coupons = 850,
active_coupons = 650,
expired_coupons = 200,
referral_codes = 500,
active_referral_codes = 100,
expired_referral_codes = 400,
active_rules = 35,
users = 56,
roles = 10,
custom_attributes = 18,
webhooks = 2,
loyalty_programs = 5,
live_loyalty_programs = 5,
)
def testAccountAnalytics(self):
"""Test AccountAnalytics"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9979a3abf45532210706957851e2f4c6",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 732,
"avg_line_length": 39.229885057471265,
"alnum_prop": 0.5786697919718723,
"repo_name": "talon-one/talon_one.py",
"id": "312acbd4f8d1a79c3dcb4af118a9098e52645a2e",
"size": "3430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_account_analytics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
}
|
from starlord.ocr.api import *
import requests
import selenium
from selenium import webdriver
import json, urllib,urllib2
import hashlib
from urllib import urlencode
from selenium.webdriver.common import keys as KEYS
import bs4
import sys
import time
from selenium.webdriver.common.action_chains import ActionChains
from PIL import Image as PILImage
import cv2
from PIL import Image
import random
def extractEdges(image_file):
edges = []
img = cv2.imread(image_file, 0)
gray_lap = cv2.Laplacian(img,cv2.CV_16S,ksize = 3)
dst = cv2.convertScaleAbs(gray_lap)
cv2.imwrite("verify2.png",dst)
#cv2.imshow("showimage", dst)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
image = Image.open("verify2.png")
image_rgb = image.convert("RGB")
for x in xrange(2, image_rgb.size[0] - 1):
for y in xrange(2, image_rgb.size[1] - 1):
color1 = image_rgb.getpixel((x,y))
#白色
if color1==(255,255,255):
k = min(y+22,image.size[1] - 1)
allwhite = False
for j in xrange(y+1,k):
#余下竖线为白色
color2= image_rgb.getpixel((x,j))
if color2==color1:
allwhite = True
continue
else:
allwhite=False
break
if allwhite:
if edges.count(x)==0:
edges.append(x)
for i in xrange(0,len(edges)-1):
if edges[i]+1==edges[i+1]:
edges[i]=0
for x in edges:
if x==0:
edges.remove(x)
for z in edges:
print str(z)
if len(edges)==2:
distance1 = edges[1]-edges[0]
elif len(edges)>2:
distance1 = edges[2]-edges[0]
return distance1
headers0 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cache-Control': 'max-age=0'
}
driver = webdriver.Chrome()
driver.maximize_window()
page = driver.get("http://www.qixin.com/login")
#elem = driver.find_element_by_xpath("//script[6]")
time.sleep(2)
#elem = driver.find_element_by_xpath("//div[@class='behavior_verify_content']")
elem = driver.find_element_by_css_selector('.gt_slider_knob.gt_show')
ActionChains(driver).click_and_hold(elem).perform()
time.sleep(1)
driver.get_screenshot_as_file('web.png')
#print elem.location.values()
elem2 = driver.find_element_by_css_selector('.gt_cut_fullbg.gt_show')
#ActionChains(driver).move_to_element(elem).perform()
#driver.get_screenshot_as_file('2.png')
#print elem2.location.values()
#print elem2.size.values()
topx = elem2.location.values()[1]
topy = elem2.location.values()[0]
botx = topx + elem2.size.values()[0]
boty = topy + elem2.size.values()[1]
box=(topx, topy, botx, boty)
image1 = PILImage.open('web.png')
image1.crop(box).save('verify.png')
image1.close()
distance = extractEdges("verify.png")
ActionChains(driver).move_to_element(elem)
#ActionChains(driver).drag_and_drop_by_offset(elem,distance,0).perform()
road = 0
for seconds in xrange(0,20):
if seconds==19:
bias = distance-road
ActionChains(driver).move_by_offset(bias, 0).perform()
else:
ActionChains(driver).move_by_offset(0.05 * distance, 0).perform()
road = road + 0.05*distance
time.sleep(1*random.random())
#ActionChains(driver).move_to_element_with_offset(elem,distance, 0).perform()
driver.get_screenshot_as_file('web2.png')
ActionChains(driver).release(elem)
time.sleep(10)
|
{
"content_hash": "4b649a5b6d4bcc93a2242bdad711cf4c",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 93,
"avg_line_length": 21.796610169491526,
"alnum_prop": 0.6189735614307932,
"repo_name": "snakedragon/scrapy-hive",
"id": "d1c8d5605ab009f779b4bbf9ae49a3413c543eb9",
"size": "3901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starlord/test/geetest-demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "109420"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import unittest
from sqlalchemy.exc import IntegrityError
from oyProjectManager import conf, db, Asset, Project, VersionType, User, Version
class AssetTester(unittest.TestCase):
"""tests the Asset class
"""
def setUp(self):
"""setup the test settings with environment variables
"""
# -----------------------------------------------------------------
# start of the setUp
# create the environment variable and point it to a temp directory
conf.database_url = "sqlite://"
self.temp_config_folder = tempfile.mkdtemp()
self.temp_projects_folder = tempfile.mkdtemp()
os.environ["OYPROJECTMANAGER_PATH"] = self.temp_config_folder
os.environ[conf.repository_env_key] = self.temp_projects_folder
self.test_proj = Project("TEST_PROJ1")
self.test_proj.create()
self.kwargs = {
"project": self.test_proj,
"name": "Test Asset",
"code": "TEST_ASSET",
'type': 'Prop',
}
self.test_asset = Asset(**self.kwargs)
self.test_asset.save()
self._name_test_values = [
("Test Asset", "Test Asset"),
("23Test_Asset", "23Test_Asset"),
("TEST_ASSET", "TEST_ASSET"),
("£#$£#$AB", "AB"),
("'^+'^%^+%&&AB3£#$£½'^+'3'^+'4", "AB334"),
("afasfas fasf asdf67", "Afasfas fasf asdf67"),
("45a", "45a"),
("45acafs","45acafs"),
("45'^+'^+a 234", "45a 234"),
("45asf78wr", "45asf78wr"),
("'^+'afsd2342'^+'asdFGH", "Afsd2342asdFGH"),
]
self._code_test_values = [
("Test Asset", "Test_Asset"),
("23Test_Asset", "23Test_Asset"),
("TEST_ASSET", "TEST_ASSET"),
("£#$£#$AB", "AB"),
("'^+'^%^+%&&AB3£#$£½'^+'3'^+'4", "AB334"),
("afasfas fasf asdf67", "Afasfas_fasf_asdf67"),
("45a", "45a"),
("45acafs","45acafs"),
("45'^+'^+a 234", "45a_234"),
("45asf78wr", "45asf78wr"),
("'^+'afsd2342'^+'asdFGH", "Afsd2342asdFGH"),
]
def tearDown(self):
"""cleanup the test
"""
# set the db.session to None
db.session = None
# delete the temp folder
shutil.rmtree(self.temp_config_folder)
shutil.rmtree(self.temp_projects_folder)
def test_name_argument_is_skipped(self):
"""testing if a TypeError will be raised when the name argument is
skipped
"""
self.kwargs.pop("name")
self.assertRaises(TypeError, Asset, **self.kwargs)
def test_name_argument_is_None(self):
"""testing if a TypeError will be raised when the name argument is None
"""
self.kwargs["name"] = None
self.assertRaises(TypeError, Asset, **self.kwargs)
def test_name_attribute_is_None(self):
"""testing if a TypeError will be raised when the name attribute is set
to None
"""
self.assertRaises(TypeError, setattr, self.test_asset, "name", None)
def test_name_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the name argument is not
a string
"""
self.kwargs["name"] = 123445
self.assertRaises(TypeError, Asset, **self.kwargs)
def test_name_attribute_is_not_a_string(self):
"""testing if a TypeError will be raised when the name attribute is not
a string
"""
self.assertRaises(TypeError, setattr, self.test_asset, "name", 123456)
def test_name_argument_is_working_properly(self):
"""test if the name attribute initialized correctly with the value of
the name argument
"""
test_value = "Test Value"
self.kwargs["name"] = test_value
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.name, test_value)
def test_name_attribute_is_working_properly(self):
"""testing if the name attribute is working properly
"""
test_value = "Test Value"
self.test_asset.name = test_value
self.assertEqual(self.test_asset.name, test_value)
def test_name_argument_formatting(self):
"""testing if the name argument will be formatted correctly
"""
for test_value in self._name_test_values:
self.kwargs["name"] = test_value[0]
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.name, test_value[1])
def test_name_attribute_formatting(self):
"""testing if the name attribute will be formatted correctly
"""
for test_value in self._name_test_values:
self.test_asset.name = test_value[0]
self.assertEqual(self.test_asset.name, test_value[1])
def test_name_argument_is_empty_string_after_formatting(self):
"""testing if a ValueError will be raised when the name argument is an
empty string after formatting
"""
self.kwargs["name"] = "£#$£'^+'"
self.assertRaises(ValueError, Asset, **self.kwargs)
def test_name_attribute_is_empty_string_after_formatting(self):
"""testing if a ValueError will be raised when the name attribugte is
an empty string after formatting
"""
self.assertRaises(ValueError, setattr, self.test_asset, "name",
"£#$£'^+'")
def test_name_argument_is_not_unique(self):
"""testing if a IntegrityError will be raised when the name is unique
"""
# create an asset with the same name
new_asset = Asset(**self.kwargs)
self.assertRaises(IntegrityError, new_asset.save)
def test_code_argument_is_skipped(self):
"""testing if the code attribute will be get from the name attribute if
the code argument is skipped
"""
self.kwargs["name"] = "Test Value"
self.kwargs.pop("code")
expected_value = "Test_Value"
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.code, expected_value)
def test_code_argument_is_None(self):
"""testing if the code attribute will be get from the name attribute if
the code argument is None
"""
self.kwargs["name"] = "Test Value"
self.kwargs["code"] = None
expected_value = "Test_Value"
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.code, expected_value)
def test_code_attribute_is_None(self):
"""testing if the code attribute will be get from the name attribute if
it is set to None
"""
self.test_asset.name = "Test Value"
self.test_asset.code = None
expected_value = "Test_Value"
self.assertEqual(self.test_asset.code, expected_value)
def test_code_argument_is_not_a_string_instance(self):
"""testing if a TypeError will be raised when the code argument is not
an instance of string or unicode
"""
self.kwargs["code"] = 2134
self.assertRaises(TypeError, Asset, **self.kwargs)
def test_code_attribute_is_not_a_string_instance(self):
"""testing if a TypeError will be raised when the code attribute is set
to a value which is not a string or unicode
"""
self.assertRaises(TypeError, setattr, self.test_asset, "code", 2342)
def test_code_argument_is_working_properly(self):
"""testing if the code attribute is set from the code argument
"""
self.kwargs["code"] = "TEST_VALUE"
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.code, self.kwargs["code"])
def test_code_attribute_is_working_properly(self):
"""testing if the code attribute is working properly
"""
test_value = "TEST_VALUE"
self.test_asset.code = test_value
self.assertEqual(self.test_asset.code, test_value)
def test_code_argument_formatting(self):
"""testing if the code argument is formatted correctly
"""
for test_value in self._code_test_values:
self.kwargs["code"] = test_value[0]
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.code, test_value[1])
def test_code_attribute_formatting(self):
"""testing if the code attribute is formatted correctly
"""
for test_value in self._code_test_values:
self.test_asset.code = test_value[0]
self.assertEqual(self.test_asset.code, test_value[1])
def test_code_argument_is_empty_string_after_formatting(self):
"""testing if a ValueError will be raised when the code argument is an
empty string after formatting
"""
self.kwargs["code"] = "'^+'%+%"
self.assertRaises(ValueError, Asset, **self.kwargs)
def test_code_attribute_is_empty_string_after_formatting(self):
"""testing if a ValueError will be raised when the code attribugte is
an empty string after formatting
"""
self.assertRaises(ValueError, setattr, self.test_asset, "code",
"'^+'%+%")
def test_code_argument_is_not_unique(self):
"""testing if an IntegrityError will be raised when the code argument
is not unique
"""
self.kwargs["name"] = "Another_Asset_Name"
new_asset = Asset(**self.kwargs)
self.assertRaises(IntegrityError, new_asset.save)
def test_save_method_saves_the_asset_to_the_database(self):
"""testing if the save method saves the asset to the database
"""
self.test_asset.save()
self.assertTrue(self.test_asset in db.session)
def test_equality_of_assets(self):
"""testing if two assets are equal if their names and projects are also
equal
"""
proj1 = Project("EQUALITY_TEST_PROJECT_1")
proj1.create()
proj2 = Project("EQUALITY_TEST_PROJECT_2")
proj2.create()
asset1 = Asset(proj1, "TEST_ASSET1")
asset2 = Asset(proj1, "TEST_ASSET1")
asset3 = Asset(proj1, "TEST_ASSET3")
asset4 = Asset(proj2, "TEST_ASSET3")
self.assertTrue(asset1==asset2)
self.assertFalse(asset1==asset3)
self.assertFalse(asset3==asset4)
def test_inequality_of_assets(self):
"""testing if two assets are inequal if their names are different and
or their projects are different
"""
proj1 = Project("EQUALITY_TEST_PROJECT_1")
proj1.create()
proj2 = Project("EQUALITY_TEST_PROJECT_2")
proj2.create()
asset1 = Asset(proj1, "TEST_ASSET1")
asset2 = Asset(proj1, "TEST_ASSET1")
asset3 = Asset(proj1, "TEST_ASSET3")
asset4 = Asset(proj2, "TEST_ASSET3")
self.assertFalse(asset1!=asset2)
self.assertTrue(asset1!=asset3)
self.assertTrue(asset3!=asset4)
def test_type_argument_is_skipped(self):
"""testing if skipping the type argument the type attribute will be set
to conf.default_asset_type_name
"""
self.kwargs.pop('type')
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.type, conf.default_asset_type_name)
def test_type_argument_is_None(self):
"""testing if setting the type argument to None will set the type
attribute to conf.default_asset_type_name
"""
self.kwargs['type'] = None
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.type, conf.default_asset_type_name)
def test_type_attribute_is_set_to_None(self):
"""testing if setting the type attribute to None will set the type to
conf.default_asset_type_name
"""
self.test_asset.type = None
self.assertEqual(self.test_asset.type, conf.default_asset_type_name)
def test_type_argument_accepts_string_or_unicode_only(self):
"""testing if a TypeError will be raised when the type argument is set
to a value other than string or unicode value
"""
self.kwargs['type'] = 12312
self.assertRaises(TypeError, Asset, **self.kwargs)
def test_type_attribute_accepts_string_or_unicode_only(self):
"""testing if a TypeError will be raised when the type attribute is set
to a value other than string or unicode
"""
self.assertRaises(TypeError, setattr, self.test_asset, 'type', 2342)
def test_type_argument_is_working_properly(self):
"""testing if the type attribute is set with the type argument
"""
self.kwargs['type'] = "Test_Type_1"
new_asset = Asset(**self.kwargs)
self.assertEqual(self.kwargs['type'], new_asset.type)
def test_type_attribute_is_working_properly(self):
"""testing if the type attribute is working properly
"""
test_value = "Test_Type_1"
self.test_asset.type = test_value
self.assertEqual(self.test_asset.type, test_value)
def test_type_argument_formatting(self):
"""testing if the type argument is formatted correctly
"""
for test_values in self._code_test_values:
input_value = test_values[0]
expected_value = test_values[1]
self.kwargs['type'] = input_value
new_asset = Asset(**self.kwargs)
self.assertEqual(new_asset.type, expected_value)
def test_type_argument_is_invalid_after_formatting(self):
"""testing if a ValueError will be raised when the type argument is
invalid after formatting
"""
self.kwargs['type'] = '@#$@#$'
self.assertRaises(ValueError, Asset, **self.kwargs)
def test_type_attribute_is_invalid_after_formatting(self):
"""testing if a ValueError will be raised when the type attribute is
invalid after formatting
"""
self.assertRaises(ValueError, setattr, self.test_asset, 'type', '#@$#')
def test_type_attribute_formatting(self):
"""testing if the type attribute is formatted correctly
"""
for test_values in self._code_test_values:
input_value = test_values[0]
expected_value = test_values[1]
self.test_asset.type = input_value
self.assertEqual(self.test_asset.type, expected_value)
def test_deleting_an_asset_will_not_delete_the_related_project(self):
"""testing if deleting an asset will not delete the related project
"""
proj1 = Project('test project 1')
proj1.save()
asset = Asset(proj1, 'Test asset')
asset.save()
# check if they are in the session
self.assertIn(proj1, db.session)
self.assertIn(asset, db.session)
# delete the asset
db.session.delete(asset)
db.session.commit()
# check if it is removed from the session
self.assertNotIn(asset, db.session)
# and the project is there
self.assertIn(proj1, db.session)
def test_deleting_an_asset_will_not_delete_the_other_assets_in_the_related_project(self):
"""testing if deleting an asset will not delete the other assets in the
related project
"""
proj1 = Project('test project 1')
proj1.save()
asset1 = Asset(proj1, 'test asset 1')
asset1.save()
asset2 = Asset(proj1, 'test asset 2')
asset2.save()
asset3 = Asset(proj1, 'test asset 3')
asset3.save()
# check if they are properly in the db.session
self.assertIn(proj1, db.session)
self.assertIn(asset1, db.session)
self.assertIn(asset2, db.session)
self.assertIn(asset3, db.session)
# delete asset1
db.session.delete(asset1)
db.session.commit()
# check if the asset1 is deleted
self.assertNotIn(asset1, db.session)
# and the others are in place
self.assertIn(proj1, db.session)
self.assertIn(asset2, db.session)
self.assertIn(asset3, db.session)
def test_deleting_an_asset_will_delete_all_the_related_versions(self):
"""testing if deleting an asset will also delete the related versions
"""
proj1 = Project('test project 1')
proj1.save()
asset1 = Asset(proj1, 'test asset 1')
asset1.save()
asset2 = Asset(proj1, 'test asset 2')
asset2.save()
asset_vtypes = VersionType.query().filter_by(type_for="Asset").all()
user = User.query().first()
vers1 = Version(
version_of=asset1,
base_name=asset1.code,
type=asset_vtypes[0],
created_by=user
)
vers1.save()
vers2 = Version(
version_of=asset1,
base_name=asset1.code,
type=asset_vtypes[0],
created_by=user
)
vers2.save()
vers3 = Version(
version_of=asset1,
base_name=asset1.code,
type=asset_vtypes[0],
created_by=user
)
vers3.save()
vers4 = Version(
version_of=asset2,
base_name=asset2.code,
type=asset_vtypes[0],
created_by=user
)
vers4.save()
vers5 = Version(
version_of=asset2,
base_name=asset2.code,
type=asset_vtypes[0],
created_by=user
)
vers5.save()
vers6 = Version(
version_of=asset2,
base_name=asset2.code,
type=asset_vtypes[0],
created_by=user
)
vers6.save()
# check if all are in the session
self.assertIn(proj1, db.session)
self.assertIn(asset1, db.session)
self.assertIn(asset2, db.session)
self.assertIn(vers1, db.session)
self.assertIn(vers2, db.session)
self.assertIn(vers3, db.session)
self.assertIn(vers4, db.session)
self.assertIn(vers5, db.session)
self.assertIn(vers6, db.session)
# delete the asset
db.session.delete(asset1)
db.session.commit()
# check if it is not in the session anymore
self.assertNotIn(asset1, db.session)
# check if the versions are also deleted
self.assertNotIn(vers1, db.session)
self.assertNotIn(vers2, db.session)
self.assertNotIn(vers3, db.session)
# check if the others are still there
self.assertIn(proj1, db.session)
self.assertIn(asset2, db.session)
self.assertIn(vers4, db.session)
self.assertIn(vers5, db.session)
self.assertIn(vers6, db.session)
def test_deleting_an_asset_will_delete_all_the_related_versions_but_keep_references(self):
"""testing if deleting an asset will only delete the version of that
asset and will keep the referenced versions.
"""
proj1 = Project('test project 1')
proj1.save()
asset1 = Asset(proj1, 'test asset 1')
asset1.save()
asset2 = Asset(proj1, 'test asset 2')
asset2.save()
asset_vtypes = VersionType.query().filter_by(type_for="Asset").all()
user = User.query().first()
vers1 = Version(
version_of=asset1,
base_name=asset1.code,
type=asset_vtypes[0],
created_by=user
)
vers1.save()
vers2 = Version(
version_of=asset1,
base_name=asset1.code,
type=asset_vtypes[0],
created_by=user
)
vers2.save()
vers3 = Version(
version_of=asset1,
base_name=asset1.code,
type=asset_vtypes[0],
created_by=user
)
vers3.save()
vers4 = Version(
version_of=asset2,
base_name=asset2.code,
type=asset_vtypes[0],
created_by=user
)
vers4.save()
vers5 = Version(
version_of=asset2,
base_name=asset2.code,
type=asset_vtypes[0],
created_by=user
)
vers5.save()
vers6 = Version(
version_of=asset2,
base_name=asset2.code,
type=asset_vtypes[0],
created_by=user
)
vers6.save()
# reference vers4, vers5 and vers6 to vers1, vers2 and vers3
vers1.references.append(vers4)
vers1.references.append(vers5)
vers1.references.append(vers6)
vers2.references.append(vers4)
vers2.references.append(vers5)
vers2.references.append(vers6)
vers3.references.append(vers4)
vers3.references.append(vers5)
vers3.references.append(vers6)
# check if all are in the session
self.assertIn(proj1, db.session)
self.assertIn(asset1, db.session)
self.assertIn(asset2, db.session)
self.assertIn(vers1, db.session)
self.assertIn(vers2, db.session)
self.assertIn(vers3, db.session)
self.assertIn(vers4, db.session)
self.assertIn(vers5, db.session)
self.assertIn(vers6, db.session)
# there should be 9 entries in the secondary table
result = db.session\
.query('referencer_id', 'reference_id')\
.from_statement("SELECT referencer_id, reference_id "
"FROM Version_References").all()
self.assertEqual(len(result), 9)
# delete the asset
db.session.delete(asset1)
db.session.commit()
# check if it is not in the session anymore
self.assertNotIn(asset1, db.session)
# check if the versions are also deleted
self.assertNotIn(vers1, db.session)
self.assertNotIn(vers2, db.session)
self.assertNotIn(vers3, db.session)
# check if the others are still there
self.assertIn(proj1, db.session)
self.assertIn(asset2, db.session)
self.assertIn(vers4, db.session)
self.assertIn(vers5, db.session)
self.assertIn(vers6, db.session)
# to be sure check the secondary table
result = db.session\
.query('referencer_id', 'reference_id')\
.from_statement("SELECT referencer_id, reference_id "
"FROM Version_References").all()
self.assertEqual(len(result), 0)
|
{
"content_hash": "a137f291d3af15761741a9406f5b7e6b",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 94,
"avg_line_length": 35.34146341463415,
"alnum_prop": 0.5719030365769496,
"repo_name": "code-google-com/oyprojectmanager",
"id": "eb5e42a68b0fef84d9618c91bee4f49e63e1f7a6",
"size": "23405",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/models/test_asset.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Perl",
"bytes": "171"
},
{
"name": "Python",
"bytes": "1091868"
}
],
"symlink_target": ""
}
|
from .RamlWrap import ramlwrap
|
{
"content_hash": "b21ca4e485ce49aa2cf0acac8623c721",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 30,
"alnum_prop": 0.8666666666666667,
"repo_name": "jmons/ramlwrap",
"id": "1ff38289ade651eb61b13402396152c4eb1bb108",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ramlwrap/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12321"
},
{
"name": "Python",
"bytes": "86431"
},
{
"name": "Shell",
"bytes": "592"
}
],
"symlink_target": ""
}
|
debug = 0
netConfig = {
}
networkParams = {
"packetSize" : "2048B",
"link_bw" : "4GB/s",
"link_lat" : "40ns",
"input_latency" : "50ns",
"output_latency" : "50ns",
"flitSize" : "8B",
"buffer_size" : "14KB",
}
nicParams = {
"detailedCompute.name" : "thornhill.SingleThread",
"module" : "merlin.linkcontrol",
"packetSize" : networkParams['packetSize'],
"link_bw" : networkParams['link_bw'],
"buffer_size" : networkParams['buffer_size'],
"rxMatchDelay_ns" : 100,
"txDelay_ns" : 50,
"nic2host_lat" : "150ns",
}
emberParams = {
"os.module" : "firefly.hades",
"os.name" : "hermesParams",
"api.0.module" : "firefly.hadesMP",
"verbose" : 0,
}
hermesParams = {
"hermesParams.detailedCompute.name" : "thornhill.SingleThread",
"hermesParams.memoryHeapLink.name" : "thornhill.MemoryHeapLink",
"hermesParams.nicModule" : "firefly.VirtNic",
"hermesParams.functionSM.defaultEnterLatency" : 30000,
"hermesParams.functionSM.defaultReturnLatency" : 30000,
"hermesParams.ctrlMsg.shortMsgLength" : 12000,
"hermesParams.ctrlMsg.matchDelay_ns" : 150,
"hermesParams.ctrlMsg.txSetupMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.txSetupModParams.range.0" : "0-:130ns",
"hermesParams.ctrlMsg.rxSetupMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.rxSetupModParams.range.0" : "0-:100ns",
"hermesParams.ctrlMsg.txMemcpyMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.txMemcpyModParams.op" : "Mult",
"hermesParams.ctrlMsg.txMemcpyModParams.range.0" : "0-:344ps",
"hermesParams.ctrlMsg.rxMemcpyMod" : "firefly.LatencyMod",
"hermesParams.ctrlMsg.txMemcpyModParams.op" : "Mult",
"hermesParams.ctrlMsg.rxMemcpyModParams.range.0" : "0-:344ps",
"hermesParams.ctrlMsg.sendAckDelay_ns" : 0,
"hermesParams.ctrlMsg.regRegionBaseDelay_ns" : 3000,
"hermesParams.ctrlMsg.regRegionPerPageDelay_ns" : 100,
"hermesParams.ctrlMsg.regRegionXoverLength" : 4096,
"hermesParams.loadMap.0.start" : 0,
"hermesParams.loadMap.0.len" : 2,
}
|
{
"content_hash": "fead87f3c0703b791baa41fb7d62b435",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 66,
"avg_line_length": 31.923076923076923,
"alnum_prop": 0.6751807228915663,
"repo_name": "minyee/sst-macro",
"id": "04759e9900bee2ab6d612a27d45a39be6a1f6ae2",
"size": "2076",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/emberDefaultParams.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "645067"
},
{
"name": "C++",
"bytes": "6981977"
},
{
"name": "CMake",
"bytes": "5308"
},
{
"name": "Cuda",
"bytes": "753"
},
{
"name": "M4",
"bytes": "184023"
},
{
"name": "Makefile",
"bytes": "86904"
},
{
"name": "Objective-C",
"bytes": "65934"
},
{
"name": "Perl",
"bytes": "6105"
},
{
"name": "Python",
"bytes": "105488"
},
{
"name": "Shell",
"bytes": "80899"
}
],
"symlink_target": ""
}
|
import datetime as system_datetime
def now():
return system_datetime.datetime.now()
def strnow(nowtime=None):
if nowtime is None:
nowtime = now()
return nowtime.strftime("%Y-%m-%d %H:%M:%S")
def now14(nowtime=None):
if nowtime is None:
nowtime = now()
return nowtime.strftime("%Y%m%d%H%M%S")
|
{
"content_hash": "a0873f9fe0e96f85d10df9b54272dbf0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 48,
"avg_line_length": 19.647058823529413,
"alnum_prop": 0.6287425149700598,
"repo_name": "zencore-dobetter/zencore-utils",
"id": "eb4871f86c37b5f1d74108f2f13407a631f30ef8",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zencore/utils/datetime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108442"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
}
|
"""
Initialize, start, stop, or destroy WAL replication mirror segments.
============================= DISCLAIMER =============================
This is a developer tool to assist with development of WAL replication
for mirror segments. This tool is not meant to be used in production.
It is suggested to only run this tool against a gpdemo cluster that
was initialized with no FileRep mirrors.
Example:
WITH_MIRRORS=false make create-demo-cluster
======================================================================
Assumptions:
1. Greenplum cluster was compiled with --enable-segwalrep
2. Greenplum cluster was initialized without mirror segments.
3. Cluster is all on one host
4. Greenplum environment is all setup (greenplum_path.sh, MASTER_DATA_DIRECTORY, PGPORT, etc.)
5. Greenplum environment is started
6. Greenplum environment is the same throughout tool usage
Assuming all of the above, you can just run the tool as so:
./gpsegwalrep.py [init|start|stop|destroy]
"""
import argparse
import os
import sys
import subprocess
import threading
from gppylib.db import dbconn
PRINT_LOCK = threading.Lock()
THREAD_LOCK = threading.Lock()
def runcommands(commands, thread_name, command_finish, exit_on_error=True):
output = []
for command in commands:
try:
output.append('Running command... %s' % command)
with THREAD_LOCK:
output = output + subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True).split('\n')
except subprocess.CalledProcessError, e:
output.append(str(e))
if exit_on_error:
with PRINT_LOCK:
for line in output:
print '%s: %s' % (thread_name, line)
print ''
sys.exit(e.returncode)
output.append(command_finish)
with PRINT_LOCK:
for line in output:
print '%s: %s' % (thread_name, line)
print ''
class InitMirrors():
''' Initialize the WAL replication mirror segment '''
def __init__(self, segconfigs, hostname):
self.segconfigs = segconfigs
self.hostname = hostname
def initThread(self, segconfig, user):
commands = []
primary_port = segconfig[2]
primary_dir = segconfig[3]
mirror_contentid = segconfig[1]
mirror_dir = segconfig[3].replace('dbfast', 'dbfast_mirror')
mirror_port = primary_port + 10000
commands.append("echo 'host replication %s samenet trust' >> %s/pg_hba.conf" % (user, primary_dir))
commands.append("pg_ctl -D %s reload" % primary_dir)
commands.append("pg_basebackup -x -R -c fast -E ./pg_log -E ./db_dumps -E ./gpperfmon/data -E ./gpperfmon/logs -D %s -h %s -p %d" % (mirror_dir, self.hostname, primary_port))
commands.append("mkdir %s/pg_log; mkdir %s/pg_xlog/archive_status" % (mirror_dir, mirror_dir))
catalog_update_query = "select pg_catalog.gp_add_segment_mirror(%d::int2, '%s', '%s', %d, -1, '{pg_system, %s}')" % (mirror_contentid, self.hostname, self.hostname, mirror_port, mirror_dir)
commands.append("PGOPTIONS=\"-c gp_session_role=utility\" psql postgres -c \"%s\"" % catalog_update_query)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Initialized mirror at %s' % mirror_dir
runcommands(commands, thread_name, command_finish)
def run(self):
# Assume db user is current user
user = subprocess.check_output(["whoami"]).rstrip('\n')
initThreads = []
for segconfig in self.segconfigs:
if segconfig[4] == 'p':
thread = threading.Thread(target=self.initThread, args=(segconfig, user))
thread.start()
initThreads.append(thread)
for thread in initThreads:
thread.join()
class StartMirrors():
''' Start the WAL replication mirror segment '''
def __init__(self, segconfigs, host):
self.segconfigs = segconfigs
self.num_contents = len(segconfigs)
self.host = host
def startThread(self, segconfig):
commands = []
mirror_contentid = segconfig[1]
mirror_port = segconfig[2]
mirror_dir = segconfig[3]
opts = "-p %d --gp_dbid=0 --silent-mode=true -i -M mirrorless --gp_contentid=%d --gp_num_contents_in_cluster=%d" % (mirror_port, mirror_contentid, self.num_contents)
commands.append("pg_ctl -D %s -o '%s' start" % (mirror_dir, opts))
commands.append("pg_ctl -D %s status" % mirror_dir)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Started mirror with content %d and port %d at %s' % (mirror_contentid, mirror_port, mirror_dir)
runcommands(commands, thread_name, command_finish)
def run(self):
startThreads = []
for segconfig in self.segconfigs:
if segconfig[4] == 'm':
thread = threading.Thread(target=self.startThread, args=(segconfig,))
thread.start()
startThreads.append(thread)
for thread in startThreads:
thread.join()
class StopMirrors():
''' Stop the WAL replication mirror segment '''
def __init__(self, segconfigs):
self.segconfigs = segconfigs
def stopThread(self, segconfig):
commands = []
mirror_contentid = segconfig[1]
mirror_dir = segconfig[3]
commands.append("pg_ctl -D %s stop" % mirror_dir)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Stopped mirror at %s' % mirror_dir
runcommands(commands, thread_name, command_finish)
def run(self):
stopThreads = []
for segconfig in self.segconfigs:
if segconfig[4] == 'm':
thread = threading.Thread(target=self.stopThread, args=(segconfig,))
thread.start()
stopThreads.append(thread)
for thread in stopThreads:
thread.join()
class DestroyMirrors():
''' Destroy the WAL replication mirror segment '''
def __init__(self, segconfigs):
self.segconfigs = segconfigs
def destroyThread(self, segconfig):
commands = []
mirror_contentid = segconfig[1]
mirror_dir = segconfig[3]
commands.append("pg_ctl -D %s stop" % mirror_dir)
commands.append("rm -rf %s" % mirror_dir)
catalog_update_query = "select pg_catalog.gp_remove_segment_mirror(%d::int2)" % (mirror_contentid)
commands.append("PGOPTIONS=\"-c gp_session_role=utility\" psql postgres -c \"%s\"" % catalog_update_query)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Destroyed mirror at %s' % mirror_dir
runcommands(commands, thread_name, command_finish, False)
def run(self):
destroyThreads = []
for segconfig in self.segconfigs:
if segconfig[4] == 'm':
thread = threading.Thread(target=self.destroyThread, args=(segconfig,))
thread.start()
destroyThreads.append(thread)
for thread in destroyThreads:
thread.join()
def getSegInfo(hostname, port, dbname):
query = "SELECT dbid, content, port, fselocation, preferred_role FROM gp_segment_configuration s, pg_filespace_entry f WHERE s.content != -1 AND s.dbid = fsedbid"
dburl = dbconn.DbURL(hostname, port, dbname)
try:
with dbconn.connect(dburl) as conn:
segconfigs = dbconn.execSQL(conn, query).fetchall()
except Exception, e:
print e
sys.exit(1)
return segconfigs
def defargs():
parser = argparse.ArgumentParser(description='Initialize, start, stop, or destroy WAL replication mirror segments')
parser.add_argument('--host', type=str, required=False, default=os.getenv('PGHOST', 'localhost'),
help='Master host to get segment config information from')
parser.add_argument('--port', type=str, required=False, default=os.getenv('PGPORT', '15432'),
help='Master port to get segment config information from')
parser.add_argument('--database', type=str, required=False, default='postgres',
help='Database name to get segment config information from')
parser.add_argument('operation', type=str, choices=['init', 'start', 'stop', 'destroy'])
return parser.parse_args()
if __name__ == "__main__":
# Get parsed args
args = defargs()
# Get information on all primary segments
segconfigs = getSegInfo(args.host, args.port, args.database)
# Execute the chosen operation
if args.operation == 'init':
InitMirrors(segconfigs, args.host).run()
elif args.operation == 'start':
StartMirrors(segconfigs, args.host).run()
elif args.operation == 'stop':
StopMirrors(segconfigs).run()
elif args.operation == 'destroy':
DestroyMirrors(segconfigs).run()
|
{
"content_hash": "1ec19406603578e98097e5735c971415",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 197,
"avg_line_length": 38.44444444444444,
"alnum_prop": 0.620386838594931,
"repo_name": "Quikling/gpdb",
"id": "a70f2f7da6fe5d5f1eeb9d5672e33524c0295c34",
"size": "9020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpAux/gpdemo/gpsegwalrep.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35104900"
},
{
"name": "C++",
"bytes": "3826418"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "731336"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268348"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "105042"
},
{
"name": "Makefile",
"bytes": "428681"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5487194"
},
{
"name": "Perl",
"bytes": "3894496"
},
{
"name": "Perl 6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "8656525"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "541518"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488297"
}
],
"symlink_target": ""
}
|
import binascii
import os, sys, re, json
from collections import defaultdict, OrderedDict
from typing import (NamedTuple, Union, TYPE_CHECKING, Tuple, Optional, Callable, Any,
Sequence, Dict, Generic, TypeVar)
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from locale import localeconv
import asyncio
import urllib.request, urllib.parse, urllib.error
import builtins
import json
import time
from typing import NamedTuple, Optional
import ssl
import ipaddress
import random
import aiohttp
from aiohttp_socks import ProxyConnector, ProxyType
import aiorpcx
from aiorpcx import TaskGroup
import certifi
import dns.resolver
import ecdsa
from .i18n import _
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .network import Network
from .interface import Interface
from .simple_config import SimpleConfig
_logger = get_logger(__name__)
def inv_dict(d):
return {v: k for k, v in d.items()}
ca_path = certifi.where()
base_units = {'VIA':8, 'mVIA':5, 'uVIA':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['VIA', 'mVIA', 'uVIA', 'sat'] # list(dict) does not guarantee order
DECIMAL_POINT_DEFAULT = 8 # VIA
# types of payment requests
PR_TYPE_ONCHAIN = 0
PR_TYPE_LN = 2
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
PR_INFLIGHT = 4 # unconfirmed
PR_FAILED = 5
PR_ROUTING = 6
pr_color = {
PR_UNPAID: (.7, .7, .7, 1),
PR_PAID: (.2, .9, .2, 1),
PR_UNKNOWN: (.7, .7, .7, 1),
PR_EXPIRED: (.9, .2, .2, 1),
PR_INFLIGHT: (.9, .6, .3, 1),
PR_FAILED: (.9, .2, .2, 1),
PR_ROUTING: (.9, .6, .3, 1),
}
pr_tooltips = {
PR_UNPAID:_('Pending'),
PR_PAID:_('Paid'),
PR_UNKNOWN:_('Unknown'),
PR_EXPIRED:_('Expired'),
PR_INFLIGHT:_('In progress'),
PR_FAILED:_('Failed'),
PR_ROUTING: _('Computing route...'),
}
PR_DEFAULT_EXPIRATION_WHEN_CREATING = 24*60*60 # 1 day
pr_expiration_values = {
0: _('Never'),
10*60: _('10 minutes'),
60*60: _('1 hour'),
24*60*60: _('1 day'),
7*24*60*60: _('1 week'),
}
assert PR_DEFAULT_EXPIRATION_WHEN_CREATING in pr_expiration_values
def get_request_status(req):
status = req['status']
exp = req.get('exp', 0) or 0
if req.get('type') == PR_TYPE_LN and exp == 0:
status = PR_EXPIRED # for BOLT-11 invoices, exp==0 means 0 seconds
if req['status'] == PR_UNPAID and exp > 0 and req['time'] + req['exp'] < time.time():
status = PR_EXPIRED
status_str = pr_tooltips[status]
if status == PR_UNPAID:
if exp > 0:
expiration = exp + req['time']
status_str = _('Expires') + ' ' + age(expiration, include_seconds=True)
else:
status_str = _('Pending')
return status, status_str
class UnknownBaseUnit(Exception): pass
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise UnknownBaseUnit(dp) from None
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise UnknownBaseUnit(unit_name) from None
class NotEnoughFunds(Exception):
def __str__(self):
return _("Insufficient funds")
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class MultipleSpendMaxTxOutputs(Exception):
def __str__(self):
return _('At most one output can be set to spend max')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
class UserFacingException(Exception):
"""Exception that contains information intended to be shown to the user."""
class InvoiceError(UserFacingException): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
# note: this is not a NamedTuple as then its json encoding cannot be customized
class Satoshis(object):
__slots__ = ('value',)
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
# note: 'value' sometimes has msat precision
self.value = value
return self
def __repr__(self):
return f'Satoshis({self.value})'
def __str__(self):
# note: precision is truncated to satoshis here
return format_satoshis(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not (self == other)
# note: this is not a NamedTuple as then its json encoding cannot be customized
class Fiat(object):
__slots__ = ('value', 'ccy')
def __new__(cls, value: Optional[Decimal], ccy: str):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
if not isinstance(value, (Decimal, type(None))):
raise TypeError(f"value should be Decimal or None, not {type(value)}")
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value is None or self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value)
def to_ui_string(self):
if self.value is None or self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
def __eq__(self, other):
if self.ccy != other.ccy:
return False
if isinstance(self.value, Decimal) and isinstance(other.value, Decimal) \
and self.value.is_nan() and other.value.is_nan():
return True
return self.value == other.value
def __ne__(self, other):
return not (self == other)
class MyEncoder(json.JSONEncoder):
def default(self, obj):
# note: this does not get called for namedtuples :( https://bugs.python.org/issue30343
from .transaction import Transaction, TxOutput
from .lnutil import UpdateAddHtlc
if isinstance(obj, UpdateAddHtlc):
return obj.to_tuple()
if isinstance(obj, Transaction):
return obj.serialize()
if isinstance(obj, TxOutput):
return obj.to_legacy_tuple()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
if isinstance(obj, bytes): # for nametuples in lnchannel
return obj.hex()
if hasattr(obj, 'to_json') and callable(obj.to_json):
return obj.to_json()
return super(MyEncoder, self).default(obj)
class ThreadJob(Logger):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def __init__(self):
Logger.__init__(self)
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
ThreadJob.__init__(self)
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.logger.info("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.logger.info(f"{class_.__name__}: {len(objs)}")
self.logger.info("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, Logger):
""" daemon thread that terminates cleanly """
LOGGING_SHORTCUT = 'd'
def __init__(self):
threading.Thread.__init__(self)
Logger.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
self.logger.exception('')
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.logger.info("jnius detach")
self.logger.info("stopped")
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
_profiler_logger = _logger.getChild('profiler')
def profiler(func):
def do_profile(args, kw_args):
name = func.__qualname__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
_profiler_logger.debug(f"{name} {t:,.4f}")
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
from android.storage import primary_external_storage_path
return primary_external_storage_path()
def android_backup_dir():
d = os.path.join(android_ext_dir(), 'org.electrum_ltc.electrum_ltc')
if not os.path.exists(d):
os.mkdir(d)
return d
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def get_backup_dir(config):
if 'ANDROID_DATA' in os.environ:
return android_backup_dir() if config.get('android_backups') else None
else:
return config.get('backup_dir')
def ensure_sparse_file(filename):
# On modern Linux, no need to do anything.
# On Windows, need to explicitly mark file.
if os.name == "nt":
try:
os.system('fsutil sparse setflag "{}" 1'.format(filename))
except Exception as e:
_logger.info(f'error marking file {filename} as sparse: {e}')
def get_headers_dir(config):
return config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def standardize_path(path):
return os.path.normcase(
os.path.realpath(
os.path.abspath(
os.path.expanduser(
path
))))
def get_new_wallet_name(wallet_folder: str) -> str:
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
return filename
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc) -> str:
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8') -> bytes:
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
"""
return x.hex()
def xor_bytes(a: bytes, b: bytes) -> bytes:
size = min(len(a), len(b))
return ((int.from_bytes(a[:size], "big") ^ int.from_bytes(b[:size], "big"))
.to_bytes(size, "big"))
def user_dir():
if "ELECTRUMDIR" in os.environ:
return os.environ["ELECTRUMDIR"]
elif 'ANDROID_DATA' in os.environ:
return android_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".vialectrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Vialectrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Vialectrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def resource_path(*parts):
return os.path.join(pkg_dir, *parts)
# absolute path to python package folder of electrum ("lib")
pkg_dir = os.path.split(os.path.realpath(__file__))[0]
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def is_hash256_str(text: Any) -> bool:
if not isinstance(text, str): return False
if len(text) != 64: return False
return is_hex_str(text)
def is_hex_str(text: Any) -> bool:
if not isinstance(text, str): return False
try:
bytes.fromhex(text)
except:
return False
return True
def is_non_negative_integer(val) -> bool:
try:
val = int(val)
if val >= 0:
return True
except:
pass
return False
def chunks(items, size: int):
"""Break up items, an iterable, into chunks of length size."""
if size < 1:
raise ValueError(f"size must be positive, not {repr(size)}")
for i in range(0, len(items), size):
yield items[i: i + size]
def format_satoshis_plain(x, decimal_point = 8) -> str:
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
if x == '!':
return 'max'
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point'] # type: str
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False) -> str:
if x is None:
return 'unknown'
if x == '!':
return 'max'
if precision is None:
precision = decimal_point
# format string
decimal_format = "." + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
# initial result
scale_factor = pow(10, decimal_point)
if not isinstance(x, Decimal):
x = Decimal(x).quantize(Decimal('1E-8'))
result = ("{:" + decimal_format + "f}").format(x / scale_factor)
if "." not in result: result += "."
result = result.rstrip('0')
# extra decimal places
integer_part, fract_part = result.split(".")
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + DECIMAL_POINT + fract_part
# leading/trailing whitespaces
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, *, num_zeros=0, precision=None):
if precision is None:
precision = FEERATE_PRECISION
num_zeros = min(num_zeros, FEERATE_PRECISION) # no more zeroes than available prec
return format_satoshis(fee, num_zeros=num_zeros, decimal_point=0, precision=precision)
def quantize_feerate(fee) -> Union[None, Decimal, int]:
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes == 0:
if include_seconds:
return "%s seconds" % distance_in_seconds
else:
return "less than a minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'cryptoID.info': ('https://chainz.cryptoid.info/via/',
{'tx': 'tx.dws', 'addr': 'address.dws?'}),
'blockbook': ('https://blockbook.viacoin.org/',
{'tx': 'tx/', 'addr': 'address/'}),
'via-insight': ('https://explorer.viacoin.org/',
{'tx': 'tx/', 'addr': 'addr/'}),
'system default': ('blockchain://12a765e31ffd4059bada1e25190f6e98c99d9714d334efa41a195a7e7e04bfe2/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'Bitaps.com': ('https://tltc.bitaps.com/',
{'tx': '', 'addr': ''}),
'LiteCore': ('https://testnet.litecore.io/',
{'tx': 'tx/', 'addr': 'address/'}),
'SoChain': ('https://chain.so/',
{'tx': 'tx/LTCTEST/', 'addr': 'address/LTCTEST/'}),
'system default': ('blockchain://4966625a4b2851d9fdee139e56211a0d88575f59ed816ff5e6a63deb4e3e29a0/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return mainnet_block_explorers if not constants.net.TESTNET else testnet_block_explorers
def block_explorer(config: 'SimpleConfig') -> str:
from . import constants
default_ = 'blockbook' if not constants.net.TESTNET else 'system default'
be_key = config.get('block_explorer', default_)
be = block_explorer_info().get(be_key)
return be_key if be is not None else default_
def block_explorer_tuple(config: 'SimpleConfig') -> Optional[Tuple[str, dict]]:
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config: 'SimpleConfig', kind: str, item: str) -> Optional[str]:
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
explorer_url, explorer_dict = be_tuple
kind_str = explorer_dict.get(kind)
if kind_str is None:
return
url_parts = [explorer_url, kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
class InvalidBitcoinURI(Exception): pass
def parse_URI(uri: str, on_pr: Callable = None, *, loop=None) -> dict:
"""Raises InvalidBitcoinURI on malformed URI."""
from . import bitcoin
from .bitcoin import COIN
if not isinstance(uri, str):
raise InvalidBitcoinURI(f"expected string, not {repr(uri)}")
if ':' not in uri:
if not bitcoin.is_address(uri):
raise InvalidBitcoinURI("Not a viacoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'viacoin':
raise InvalidBitcoinURI("Not a viacoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v) != 1:
raise InvalidBitcoinURI(f'Duplicate Key: {repr(k)}')
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise InvalidBitcoinURI(f"Invalid viacoin address: {address}")
out['address'] = address
if 'amount' in out:
am = out['amount']
try:
m = re.match(r'([0-9.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'amount' field: {repr(e)}") from e
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
try:
out['time'] = int(out['time'])
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'time' field: {repr(e)}") from e
if 'exp' in out:
try:
out['exp'] = int(out['exp'])
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'exp' field: {repr(e)}") from e
if 'sig' in out:
try:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], base=58))
except Exception as e:
raise InvalidBitcoinURI(f"failed to parse 'sig' field: {repr(e)}") from e
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
@log_exceptions
async def get_payment_request():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = await pr.get_payment_request(r)
if on_pr:
on_pr(request)
loop = loop or asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(get_payment_request(), loop)
return out
def create_bip21_uri(addr, amount_sat: Optional[int], message: Optional[str],
*, extra_query_params: Optional[dict] = None) -> str:
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
if extra_query_params is None:
extra_query_params = {}
query = []
if amount_sat:
query.append('amount=%s'%format_satoshis_plain(amount_sat))
if message:
query.append('message=%s'%urllib.parse.quote(message))
for k, v in extra_query_params.items():
if not isinstance(k, str) or k != urllib.parse.quote(k):
raise Exception(f"illegal key for URI: {repr(k)}")
v = urllib.parse.quote(v)
query.append(f"{k}={v}")
p = urllib.parse.ParseResult(scheme='viacoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return str(urllib.parse.urlunparse(p))
def maybe_extract_bolt11_invoice(data: str) -> Optional[str]:
data = data.strip() # whitespaces
data = data.lower()
if data.startswith('lightning:ln'):
data = data[10:]
if data.startswith('ln'):
return data
return None
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def send_exception_to_crash_reporter(e: BaseException):
sys.excepthook(type(e), e, e.__traceback__)
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
_logger.exception('')
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
_logger.exception('')
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
_logger.exception('')
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def log_exceptions(func):
"""Decorator to log AND re-raise exceptions."""
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
self = args[0] if len(args) > 0 else None
try:
return await func(*args, **kwargs)
except asyncio.CancelledError as e:
raise
except BaseException as e:
mylogger = self.logger if hasattr(self, 'logger') else _logger
try:
mylogger.exception(f"Exception in {func.__name__}: {repr(e)}")
except BaseException as e2:
print(f"logging exception raised: {repr(e2)}... orig exc: {repr(e)} in {func.__name__}")
raise
return wrapper
def ignore_exceptions(func):
"""Decorator to silently swallow all exceptions."""
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except asyncio.CancelledError:
# note: with python 3.8, CancelledError no longer inherits Exception, so this catch is redundant
raise
except Exception as e:
pass
return wrapper
class TxMinedInfo(NamedTuple):
height: int # height of block that mined tx
conf: Optional[int] = None # number of confirmations, SPV verified (None means unknown)
timestamp: Optional[int] = None # timestamp of block that mined tx
txpos: Optional[int] = None # position of tx in serialized block
header_hash: Optional[str] = None # hash of block that mined tx
def make_aiohttp_session(proxy: Optional[dict], headers=None, timeout=None):
if headers is None:
headers = {'User-Agent': 'Electrum'}
if timeout is None:
# The default timeout is high intentionally.
# DNS on some systems can be really slow, see e.g. #5337
timeout = aiohttp.ClientTimeout(total=45)
elif isinstance(timeout, (int, float)):
timeout = aiohttp.ClientTimeout(total=timeout)
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path)
if proxy:
connector = ProxyConnector(
proxy_type=ProxyType.SOCKS5 if proxy['mode'] == 'socks5' else ProxyType.SOCKS4,
host=proxy['host'],
port=int(proxy['port']),
username=proxy.get('user', None),
password=proxy.get('password', None),
rdns=True,
ssl=ssl_context,
)
else:
connector = aiohttp.TCPConnector(ssl=ssl_context)
return aiohttp.ClientSession(headers=headers, timeout=timeout, connector=connector)
class SilentTaskGroup(TaskGroup):
def spawn(self, *args, **kwargs):
# don't complain if group is already closed.
if self._closed:
raise asyncio.CancelledError()
return super().spawn(*args, **kwargs)
class NetworkJobOnDefaultServer(Logger):
"""An abstract base class for a job that runs on the main network
interface. Every time the main interface changes, the job is
restarted, and some of its internals are reset.
"""
def __init__(self, network: 'Network'):
Logger.__init__(self)
asyncio.set_event_loop(network.asyncio_loop)
self.network = network
self.interface = None # type: Interface
self._restart_lock = asyncio.Lock()
self._reset()
asyncio.run_coroutine_threadsafe(self._restart(), network.asyncio_loop)
register_callback(self._restart, ['default_server_changed'])
def _reset(self):
"""Initialise fields. Called every time the underlying
server connection changes.
"""
self.taskgroup = SilentTaskGroup()
async def _start(self, interface: 'Interface'):
self.interface = interface
await interface.taskgroup.spawn(self._start_tasks)
async def _start_tasks(self):
"""Start tasks in self.taskgroup. Called every time the underlying
server connection changes.
"""
raise NotImplementedError() # implemented by subclasses
async def stop(self):
unregister_callback(self._restart)
await self._stop()
async def _stop(self):
await self.taskgroup.cancel_remaining()
@log_exceptions
async def _restart(self, *args):
interface = self.network.interface
if interface is None:
return # we should get called again soon
async with self._restart_lock:
await self._stop()
self._reset()
await self._start(interface)
@property
def session(self):
s = self.interface.session
assert s is not None
return s
def create_and_start_event_loop() -> Tuple[asyncio.AbstractEventLoop,
asyncio.Future,
threading.Thread]:
def on_exception(loop, context):
"""Suppress spurious messages it appears we cannot control."""
SUPPRESS_MESSAGE_REGEX = re.compile('SSL handshake|Fatal read error on|'
'SSL error in data received')
message = context.get('message')
if message and SUPPRESS_MESSAGE_REGEX.match(message):
return
loop.default_exception_handler(context)
loop = asyncio.get_event_loop()
loop.set_exception_handler(on_exception)
# loop.set_debug(1)
stopping_fut = asyncio.Future()
loop_thread = threading.Thread(target=loop.run_until_complete,
args=(stopping_fut,),
name='EventLoop')
loop_thread.start()
return loop, stopping_fut, loop_thread
class OrderedDictWithIndex(OrderedDict):
"""An OrderedDict that keeps track of the positions of keys.
Note: very inefficient to modify contents, except to add new items.
"""
def __init__(self):
super().__init__()
self._key_to_pos = {}
self._pos_to_key = {}
def _recalc_index(self):
self._key_to_pos = {key: pos for (pos, key) in enumerate(self.keys())}
self._pos_to_key = {pos: key for (pos, key) in enumerate(self.keys())}
def pos_from_key(self, key):
return self._key_to_pos[key]
def value_from_pos(self, pos):
key = self._pos_to_key[pos]
return self[key]
def popitem(self, *args, **kwargs):
ret = super().popitem(*args, **kwargs)
self._recalc_index()
return ret
def move_to_end(self, *args, **kwargs):
ret = super().move_to_end(*args, **kwargs)
self._recalc_index()
return ret
def clear(self):
ret = super().clear()
self._recalc_index()
return ret
def pop(self, *args, **kwargs):
ret = super().pop(*args, **kwargs)
self._recalc_index()
return ret
def update(self, *args, **kwargs):
ret = super().update(*args, **kwargs)
self._recalc_index()
return ret
def __delitem__(self, *args, **kwargs):
ret = super().__delitem__(*args, **kwargs)
self._recalc_index()
return ret
def __setitem__(self, key, *args, **kwargs):
is_new_key = key not in self
ret = super().__setitem__(key, *args, **kwargs)
if is_new_key:
pos = len(self) - 1
self._key_to_pos[key] = pos
self._pos_to_key[pos] = key
return ret
def multisig_type(wallet_type):
'''If wallet_type is mofn multi-sig, return [m, n],
otherwise return None.'''
if not wallet_type:
return None
match = re.match(r'(\d+)of(\d+)', wallet_type)
if match:
match = [int(x) for x in match.group(1, 2)]
return match
def is_ip_address(x: Union[str, bytes]) -> bool:
if isinstance(x, bytes):
x = x.decode("utf-8")
try:
ipaddress.ip_address(x)
return True
except ValueError:
return False
def list_enabled_bits(x: int) -> Sequence[int]:
"""e.g. 77 (0b1001101) --> (0, 2, 3, 6)"""
binary = bin(x)[2:]
rev_bin = reversed(binary)
return tuple(i for i, b in enumerate(rev_bin) if b == '1')
def resolve_dns_srv(host: str):
srv_records = dns.resolver.query(host, 'SRV')
# priority: prefer lower
# weight: tie breaker; prefer higher
srv_records = sorted(srv_records, key=lambda x: (x.priority, -x.weight))
def dict_from_srv_record(srv):
return {
'host': str(srv.target),
'port': srv.port,
}
return [dict_from_srv_record(srv) for srv in srv_records]
def randrange(bound: int) -> int:
"""Return a random integer k such that 1 <= k < bound, uniformly
distributed across that range."""
return ecdsa.util.randrange(bound)
class CallbackManager:
# callbacks set by the GUI
def __init__(self):
self.callback_lock = threading.Lock()
self.callbacks = defaultdict(list) # note: needs self.callback_lock
self.asyncio_loop = None
def register_callback(self, callback, events):
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
if self.asyncio_loop is None:
self.asyncio_loop = asyncio.get_event_loop()
assert self.asyncio_loop.is_running(), "event loop not running"
with self.callback_lock:
callbacks = self.callbacks[event][:]
for callback in callbacks:
# FIXME: if callback throws, we will lose the traceback
if asyncio.iscoroutinefunction(callback):
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
self.asyncio_loop.call_soon_threadsafe(callback, event, *args)
callback_mgr = CallbackManager()
trigger_callback = callback_mgr.trigger_callback
register_callback = callback_mgr.register_callback
unregister_callback = callback_mgr.unregister_callback
_NetAddrType = TypeVar("_NetAddrType")
class NetworkRetryManager(Generic[_NetAddrType]):
"""Truncated Exponential Backoff for network connections."""
def __init__(
self, *,
max_retry_delay_normal: float,
init_retry_delay_normal: float,
max_retry_delay_urgent: float = None,
init_retry_delay_urgent: float = None,
):
self._last_tried_addr = {} # type: Dict[_NetAddrType, Tuple[float, int]] # (unix ts, num_attempts)
# note: these all use "seconds" as unit
if max_retry_delay_urgent is None:
max_retry_delay_urgent = max_retry_delay_normal
if init_retry_delay_urgent is None:
init_retry_delay_urgent = init_retry_delay_normal
self._max_retry_delay_normal = max_retry_delay_normal
self._init_retry_delay_normal = init_retry_delay_normal
self._max_retry_delay_urgent = max_retry_delay_urgent
self._init_retry_delay_urgent = init_retry_delay_urgent
def _trying_addr_now(self, addr: _NetAddrType) -> None:
last_time, num_attempts = self._last_tried_addr.get(addr, (0, 0))
# we add up to 1 second of noise to the time, so that clients are less likely
# to get synchronised and bombard the remote in connection waves:
cur_time = time.time() + random.random()
self._last_tried_addr[addr] = cur_time, num_attempts + 1
def _on_connection_successfully_established(self, addr: _NetAddrType) -> None:
self._last_tried_addr[addr] = time.time(), 0
def _can_retry_addr(self, peer: _NetAddrType, *,
now: float = None, urgent: bool = False) -> bool:
if now is None:
now = time.time()
last_time, num_attempts = self._last_tried_addr.get(peer, (0, 0))
if urgent:
delay = min(self._max_retry_delay_urgent,
self._init_retry_delay_urgent * 2 ** num_attempts)
else:
delay = min(self._max_retry_delay_normal,
self._init_retry_delay_normal * 2 ** num_attempts)
next_time = last_time + delay
return next_time < now
def _clear_addr_retry_times(self) -> None:
self._last_tried_addr.clear()
class MySocksProxy(aiorpcx.SOCKSProxy):
async def open_connection(self, host=None, port=None, **kwargs):
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
transport, _ = await self.create_connection(
lambda: protocol, host, port, **kwargs)
writer = asyncio.StreamWriter(transport, protocol, reader, loop)
return reader, writer
@classmethod
def from_proxy_dict(cls, proxy: dict = None) -> Optional['MySocksProxy']:
if not proxy:
return None
username, pw = proxy.get('user'), proxy.get('password')
if not username or not pw:
auth = None
else:
auth = aiorpcx.socks.SOCKSUserAuth(username, pw)
addr = aiorpcx.NetAddress(proxy['host'], proxy['port'])
if proxy['mode'] == "socks4":
ret = cls(addr, aiorpcx.socks.SOCKS4a, auth)
elif proxy['mode'] == "socks5":
ret = cls(addr, aiorpcx.socks.SOCKS5, auth)
else:
raise NotImplementedError # http proxy not available with aiorpcx
return ret
|
{
"content_hash": "29ee7e045335a30cb83ec74c38ea1891",
"timestamp": "",
"source": "github",
"line_count": 1378,
"max_line_length": 119,
"avg_line_length": 31.400580551523948,
"alnum_prop": 0.6024035128264387,
"repo_name": "vialectrum/vialectrum",
"id": "a954589398b0234dc4b29324a0285e09f392be6e",
"size": "44408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_ltc/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "839"
},
{
"name": "NSIS",
"bytes": "7496"
},
{
"name": "Python",
"bytes": "1895270"
},
{
"name": "Shell",
"bytes": "16219"
}
],
"symlink_target": ""
}
|
import mock
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers import xio
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger("cinder.volume.driver")
ISE_IP1 = '10.12.12.1'
ISE_IP2 = '10.11.12.2'
ISE_ISCSI_IP1 = '1.2.3.4'
ISE_ISCSI_IP2 = '1.2.3.5'
ISE_GID = 'isegid'
ISE_IQN = ISE_GID
ISE_WWN1 = ISE_GID + '1'
ISE_WWN2 = ISE_GID + '2'
ISE_WWN3 = ISE_GID + '3'
ISE_WWN4 = ISE_GID + '4'
ISE_TARGETS = [ISE_WWN1, ISE_WWN2, ISE_WWN3, ISE_WWN4]
ISE_INIT_TARGET_MAP = {'init_wwn1': ISE_TARGETS,
'init_wwn2': ISE_TARGETS}
VOLUME_SIZE = 10
NEW_VOLUME_SIZE = 20
VOLUME1 = {'id': '1', 'name': 'volume1',
'size': VOLUME_SIZE, 'volume_type_id': 'type1'}
VOLUME2 = {'id': '2', 'name': 'volume2',
'size': VOLUME_SIZE, 'volume_type_id': 'type2',
'provider_auth': 'CHAP abc abc'}
VOLUME3 = {'id': '3', 'name': 'volume3',
'size': VOLUME_SIZE, 'volume_type_id': None}
SNAPSHOT1 = {'name': 'snapshot1',
'volume_name': VOLUME1['name'],
'volume_type_id': 'type3'}
CLONE1 = {'id': '3', 'name': 'clone1',
'size': VOLUME_SIZE, 'volume_type_id': 'type4'}
HOST1 = 'host1'
HOST2 = 'host2'
ISCSI_CONN1 = {'initiator': 'init_iqn1',
'host': HOST1}
ISCSI_CONN2 = {'initiator': 'init_iqn2',
'host': HOST2}
FC_CONN1 = {'wwpns': ['init_wwn1', 'init_wwn2'],
'host': HOST1}
FC_CONN2 = {'wwpns': ['init_wwn3', 'init_wwn4'],
'host': HOST2}
ISE_HTTP_IP = 'http://' + ISE_IP1
ISE_VOLUME1_LOCATION = '/storage/volumes/volume1'
ISE_VOLUME1_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME1_LOCATION
ISE_VOLUME2_LOCATION = '/storage/volumes/volume2'
ISE_VOLUME2_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME2_LOCATION
ISE_VOLUME3_LOCATION = '/storage/volumes/volume3'
ISE_VOLUME3_LOCATION_URL = ISE_HTTP_IP + ISE_VOLUME3_LOCATION
ISE_SNAPSHOT_LOCATION = '/storage/volumes/snapshot1'
ISE_SNAPSHOT_LOCATION_URL = ISE_HTTP_IP + ISE_SNAPSHOT_LOCATION
ISE_CLONE_LOCATION = '/storage/volumes/clone1'
ISE_CLONE_LOCATION_URL = ISE_HTTP_IP + ISE_CLONE_LOCATION
ISE_ALLOCATION_LOCATION = '/storage/allocations/a1'
ISE_ALLOCATION_LOCATION_URL = ISE_HTTP_IP + ISE_ALLOCATION_LOCATION
ISE_GET_QUERY_XML =\
"""<array>
<globalid>ABC12345</globalid>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
<capability value="49006" string="Clones" type="source"/>
</capabilities>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_XML.split())}
ISE_GET_QUERY_NO_GID_XML =\
"""<array>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
<capability value="49006" string="Clones" type="source"/>
</capabilities>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_NO_GID_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_GID_XML.split())}
ISE_GET_QUERY_NO_CLONE_XML =\
"""<array>
<globalid>ABC12345</globalid>
<capabilities>
<capability value="3" string="Storage" type="source"/>
<capability value="49003" string="Volume Affinity"/>
<capability value="49004" string="Volume Quality of Service IOPS"/>
<capability value="49005" string="Thin Provisioning"/>
</capabilities>
<controllers>
<controller>
<ipaddress>%s</ipaddress>
<rank value="1"/>
</controller>
<controller>
<ipaddress>%s</ipaddress>
<rank value="0"/>
</controller>
</controllers>
</array>""" % (ISE_IP1, ISE_IP2)
ISE_GET_QUERY_NO_CLONE_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_QUERY_NO_CLONE_XML.split())}
ISE_GET_STORAGE_POOLS_XML =\
"""
<pools>
<pool>
<name>Pool 1</name>
<id>1</id>
<status value="0" string="Operational">
<details value="0x00000000">
<detail>None</detail>
</details>
</status>
<available total="60">
<byredundancy>
<raid-0>60</raid-0>
<raid-1>30</raid-1>
<raid-5>45</raid-5>
</byredundancy>
</available>
<used total="40">
<byredundancy>
<raid-0>0</raid-0>
<raid-1>40</raid-1>
<raid-5>0</raid-5>
</byredundancy>
</used>
<media>
<medium>
<health>100</health>
<tier value="4" string="Hybrid"/>
</medium>
</media>
<volumes>
<volume>
<globalid>volgid</globalid>
</volume>
<volume>
<globalid>volgid2</globalid>
</volume>
</volumes>
</pool>
</pools>
"""
ISE_GET_STORAGE_POOLS_RESP =\
{'status': 200,
'location': 'Pool location',
'content': " ".join(ISE_GET_STORAGE_POOLS_XML.split())}
ISE_GET_VOL_STATUS_NO_VOL_NODE_XML =\
"""<volumes></volumes>"""
ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_GET_VOL_STATUS_NO_VOL_NODE_XML.split())}
ISE_GET_VOL_STATUS_NO_STATUS_XML =\
"""<volumes>
<volume self="%s">
</volume>
</volumes>""" % (ISE_VOLUME1_LOCATION_URL)
ISE_GET_VOL_STATUS_NO_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_GET_VOL_STATUS_NO_STATUS_XML.split())}
ISE_GET_VOL1_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
<size>10</size>
</volume>
</volumes>""" % (ISE_VOLUME1_LOCATION_URL)
ISE_GET_VOL1_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_GET_VOL1_STATUS_XML.split())}
ISE_GET_VOL2_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_VOLUME2_LOCATION_URL)
ISE_GET_VOL2_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME2_LOCATION_URL,
'content': " ".join(ISE_GET_VOL2_STATUS_XML.split())}
ISE_GET_VOL3_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_VOLUME3_LOCATION_URL)
ISE_GET_VOL3_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_VOLUME3_LOCATION_URL,
'content': " ".join(ISE_GET_VOL3_STATUS_XML.split())}
ISE_GET_SNAP1_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_SNAPSHOT_LOCATION_URL)
ISE_GET_SNAP1_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_SNAPSHOT_LOCATION_URL,
'content': " ".join(ISE_GET_SNAP1_STATUS_XML.split())}
ISE_GET_CLONE1_STATUS_XML =\
"""<volumes>
<volume self="%s">
<status value="0" string="Operational">
<details>
<detail>Prepared</detail>
</details>
</status>
</volume>
</volumes>""" % (ISE_CLONE_LOCATION_URL)
ISE_GET_CLONE1_STATUS_RESP =\
{'status': 200,
'location': 'u%s' % ISE_CLONE_LOCATION_URL,
'content': " ".join(ISE_GET_CLONE1_STATUS_XML.split())}
ISE_CREATE_VOLUME_XML = """<volume/>"""
ISE_CREATE_VOLUME_RESP =\
{'status': 201,
'location': ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_CREATE_VOLUME_XML.split())}
ISE_GET_IONETWORKS_XML =\
"""<chap>
<chapin value="0" string="disabled">
<username/>
<password/>
</chapin>
<chapout value="0" string="disabled">
<username/>
<password/>
</chapout>
</chap>"""
ISE_GET_IONETWORKS_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_IONETWORKS_XML.split())}
ISE_GET_IONETWORKS_CHAP_XML =\
"""<chap>
<chapin value="1" string="disabled">
<username>abc</username>
<password>abc</password>
</chapin>
<chapout value="0" string="disabled">
<username/>
<password/>
</chapout>
</chap>"""
ISE_GET_IONETWORKS_CHAP_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_IONETWORKS_CHAP_XML.split())}
ISE_DELETE_VOLUME_XML = """<volumes/>"""
ISE_DELETE_VOLUME_RESP =\
{'status': 204,
'location': '',
'content': " ".join(ISE_DELETE_VOLUME_XML.split())}
ISE_GET_ALLOC_WITH_EP_XML =\
"""<allocations>
<allocation self="%s">
<volume>
<volumename>%s</volumename>
</volume>
<endpoints>
<hostname>%s</hostname>
</endpoints>
<lun>1</lun>
</allocation>
</allocations>""" %\
(ISE_ALLOCATION_LOCATION_URL, VOLUME1['name'], HOST1)
ISE_GET_ALLOC_WITH_EP_RESP =\
{'status': 200,
'location': ISE_ALLOCATION_LOCATION_URL,
'content': " ".join(ISE_GET_ALLOC_WITH_EP_XML.split())}
ISE_GET_ALLOC_WITH_NO_ALLOC_XML =\
"""<allocations self="%s"/>""" % ISE_ALLOCATION_LOCATION_URL
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP =\
{'status': 200,
'location': ISE_ALLOCATION_LOCATION_URL,
'content': " ".join(ISE_GET_ALLOC_WITH_NO_ALLOC_XML.split())}
ISE_DELETE_ALLOC_XML = """<allocations/>"""
ISE_DELETE_ALLOC_RESP =\
{'status': 204,
'location': '',
'content': " ".join(ISE_DELETE_ALLOC_XML.split())}
ISE_GET_HOSTS_NOHOST_XML =\
"""<hosts self="http://ip/storage/hosts"/>"""
ISE_GET_HOSTS_NOHOST_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_NOHOST_XML.split())}
ISE_GET_HOSTS_HOST1_XML =\
"""<hosts self="http://ip/storage/hosts">
<host self="http://ip/storage/hosts/1">
<name>%s</name>
<id>1</id>
<endpoints self="http://ip/storage/endpoints">
<endpoint self="http://ip/storage/endpoints/ep1">
<globalid>init_wwn1</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep2">
<globalid>init_wwn2</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep1">
<globalid>init_iqn1</globalid>
</endpoint>
</endpoints>
</host>
</hosts>""" % HOST1
ISE_GET_HOSTS_HOST1_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_HOST1_XML.split())}
ISE_GET_HOSTS_HOST2_XML =\
"""<hosts self="http://ip/storage/hosts">
<host self="http://ip/storage/hosts/2">
<name>%s</name>
<id>2</id>
<endpoints self="http://ip/storage/endpoints">
<endpoint self="http://ip/storage/endpoints/ep3">
<globalid>init_wwn3</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep4">
<globalid>init_wwn4</globalid>
</endpoint>
<endpoint self="http://ip/storage/endpoints/ep3">
<globalid>init_iqn2</globalid>
</endpoint>
</endpoints>
</host>
</hosts>""" % HOST2
ISE_GET_HOSTS_HOST2_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_HOSTS_HOST2_XML.split())}
ISE_CREATE_HOST_XML =\
"""<hosts self="http://ip/storage/hosts"/>"""
ISE_CREATE_HOST_RESP =\
{'status': 201,
'location': 'http://ip/storage/hosts/host1',
'content': " ".join(ISE_CREATE_HOST_XML.split())}
ISE_CREATE_ALLOC_XML =\
"""<allocations self="http://ip/storage/allocations"/>"""
ISE_CREATE_ALLOC_RESP =\
{'status': 201,
'location': ISE_ALLOCATION_LOCATION_URL,
'content': " ".join(ISE_CREATE_ALLOC_XML.split())}
ISE_GET_ENDPOINTS_XML =\
"""<endpoints self="http://ip/storage/endpoints">
<endpoint type="array" self="http://ip/storage/endpoints/isegid">
<globalid>isegid</globalid>
<protocol>iSCSI</protocol>
<array self="http://ip/storage/arrays/ise1">
<globalid>ise1</globalid>
</array>
<host/>
<allocations self="http://ip/storage/allocations">
<allocation self="%s">
<globalid>
a1
</globalid>
</allocation>
</allocations>
</endpoint>
<endpoint type="array" self="http://ip/storage/endpoints/isegid">
<globalid>isegid</globalid>
<protocol>Fibre Channel</protocol>
<array self="http://ip/storage/arrays/ise1">
<globalid>ise1</globalid>
</array>
<host/>
<allocations self="http://ip/storage/allocations">
<allocation self="%s">
<globalid>
a1
</globalid>
</allocation>
</allocations>
</endpoint>
</endpoints>""" % (ISE_ALLOCATION_LOCATION_URL,
ISE_ALLOCATION_LOCATION_URL)
ISE_GET_ENDPOINTS_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_ENDPOINTS_XML.split())}
ISE_GET_CONTROLLERS_XML =\
"""<controllers self="http://ip/storage/arrays/controllers">
<controller>
<status/>
<ioports>
<ioport>
<ipaddresses>
<ipaddress>%s</ipaddress>
</ipaddresses>
<endpoint>
<globalid>isegid</globalid>
</endpoint>
</ioport>
</ioports>
<fcports>
<fcport>
<wwn>%s</wwn>
</fcport>
<fcport>
<wwn>%s</wwn>
</fcport>
</fcports>
</controller>
<controller>
<status/>
<ioports>
<ioport>
<ipaddresses>
<ipaddress>%s</ipaddress>
</ipaddresses>
<endpoint>
<globalid>isegid</globalid>
</endpoint>
</ioport>
</ioports>
<fcports>
<fcport>
<wwn>%s</wwn>
</fcport>
<fcport>
<wwn>%s</wwn>
</fcport>
</fcports>
</controller>
</controllers>""" % (ISE_ISCSI_IP1, ISE_WWN1, ISE_WWN2,
ISE_ISCSI_IP2, ISE_WWN3, ISE_WWN4)
ISE_GET_CONTROLLERS_RESP =\
{'status': 200,
'location': '',
'content': " ".join(ISE_GET_CONTROLLERS_XML.split())}
ISE_CREATE_SNAPSHOT_XML = """<snapshot/>"""
ISE_CREATE_SNAPSHOT_RESP =\
{'status': 201,
'location': ISE_SNAPSHOT_LOCATION_URL,
'content': " ".join(ISE_CREATE_SNAPSHOT_XML.split())}
ISE_PREP_SNAPSHOT_XML = """<snapshot/>"""
ISE_PREP_SNAPSHOT_RESP =\
{'status': 202,
'location': ISE_SNAPSHOT_LOCATION_URL,
'content': " ".join(ISE_PREP_SNAPSHOT_XML.split())}
ISE_MODIFY_VOLUME_XML = """<volume/>"""
ISE_MODIFY_VOLUME_RESP =\
{'status': 201,
'location': ISE_VOLUME1_LOCATION_URL,
'content': " ".join(ISE_MODIFY_VOLUME_XML.split())}
ISE_BAD_CONNECTION_RESP =\
{'status': 0,
'location': '',
'content': " "}
ISE_400_RESP =\
{'status': 400,
'location': '',
'content': ""}
ISE_GET_VOL_STATUS_404_XML = \
"""<response value="404" index="3">VOLUME not found.</response>"""
ISE_GET_VOL_STATUS_404_RESP =\
{'status': 404,
'location': '',
'content': " ".join(ISE_GET_VOL_STATUS_404_XML.split())}
ISE_400_INVALID_STATE_XML = \
"""<response value="400">Not in a valid state.</response>"""
ISE_400_INVALID_STATE_RESP =\
{'status': 400,
'location': '',
'content': " ".join(ISE_400_INVALID_STATE_XML.split())}
ISE_409_CONFLICT_XML = \
"""<response value="409">Conflict</response>"""
ISE_409_CONFLICT_RESP =\
{'status': 409,
'location': '',
'content': " ".join(ISE_409_CONFLICT_XML.split())}
DRIVER = "cinder.volume.drivers.xio.XIOISEDriver"
@mock.patch(DRIVER + "._opener", autospec=True)
class XIOISEDriverTestCase(object):
# Test cases for X-IO volume driver
def setUp(self):
super(XIOISEDriverTestCase, self).setUp()
# set good default values
self.configuration = mock.Mock()
self.configuration.san_ip = ISE_IP1
self.configuration.san_user = 'fakeuser'
self.configuration.san_password = 'fakepass'
self.configuration.iscsi_ip_address = ISE_ISCSI_IP1
self.configuration.driver_use_ssl = False
self.configuration.ise_completion_retries = 30
self.configuration.ise_connection_retries = 5
self.configuration.ise_retry_interval = 1
self.configuration.volume_backend_name = 'ise1'
self.driver = None
self.protocol = ''
self.connector = None
self.connection_failures = 0
self.hostgid = ''
self.use_response_table = 1
def setup_test(self, protocol):
self.protocol = protocol
# set good default values
if self.protocol == 'iscsi':
self.configuration.ise_protocol = protocol
self.connector = ISCSI_CONN1
self.hostgid = self.connector['initiator']
elif self.protocol == 'fibre_channel':
self.configuration.ise_protocol = protocol
self.connector = FC_CONN1
self.hostgid = self.connector['wwpns'][0]
def setup_driver(self):
# this setups up driver object with previously set configuration values
if self.configuration.ise_protocol == 'iscsi':
self.driver =\
xio.XIOISEISCSIDriver(configuration=self.configuration)
elif self.configuration.ise_protocol == 'fibre_channel':
self.driver =\
xio.XIOISEFCDriver(configuration=self.configuration)
elif self.configuration.ise_protocol == 'test_prot':
# if test_prot specified override with correct protocol
# used to bypass protocol specific driver
self.configuration.ise_protocol = self.protocol
self.driver = xio.XIOISEDriver(configuration=self.configuration)
else:
# Invalid protocol type
raise exception.Invalid()
#################################
## UNIT TESTS ##
#################################
def test_do_setup(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
def test_negative_do_setup_no_clone_support(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_CLONE_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.do_setup, None)
def test_negative_do_setup_bad_globalid_none(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_NO_GID_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.do_setup, None)
def test_check_for_setup_error(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.driver.check_for_setup_error()
def test_negative_do_setup_bad_ip(self, mock_req):
# set san_ip to bad value
self.configuration.san_ip = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.check_for_setup_error)
def test_negative_do_setup_bad_user_blank(self, mock_req):
# set san_user to bad value
self.configuration.san_login = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.check_for_setup_error)
def test_negative_do_setup_bad_password_blank(self, mock_req):
# set san_password to bad value
self.configuration.san_password = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP])
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.check_for_setup_error)
def test_get_volume_stats(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_STORAGE_POOLS_RESP])
backend_name = self.configuration.volume_backend_name
if self.configuration.ise_protocol == 'iscsi':
protocol = 'iSCSI'
else:
protocol = 'fibre_channel'
exp_result = {}
exp_result = {'vendor_name': "X-IO",
'driver_version': "1.1.0",
'volume_backend_name': backend_name,
'reserved_percentage': 0,
'total_capacity_gb': 100,
'free_capacity_gb': 60,
'QoS_support': True,
'affinity': True,
'thin': False,
'pools': [{'pool_ise_name': "Pool 1",
'pool_name': "1",
'status': "Operational",
'status_details': "None",
'free_capacity_gb': 60,
'free_capacity_gb_raid_0': 60,
'free_capacity_gb_raid_1': 30,
'free_capacity_gb_raid_5': 45,
'allocated_capacity_gb': 40,
'allocated_capacity_gb_raid_0': 0,
'allocated_capacity_gb_raid_1': 40,
'allocated_capacity_gb_raid_5': 0,
'health': 100,
'media': "Hybrid",
'total_capacity_gb': 100,
'QoS_support': True,
'reserved_percentage': 0}],
'active_volumes': 2,
'storage_protocol': protocol}
act_result = self.driver.get_volume_stats(True)
self.assertDictMatch(exp_result, act_result)
def test_get_volume_stats_ssl(self, mock_req):
self.configuration.driver_use_ssl = True
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_STORAGE_POOLS_RESP])
self.driver.get_volume_stats(True)
def test_negative_get_volume_stats_bad_primary(self, mock_req):
self.configuration.ise_connection_retries = 1
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_BAD_CONNECTION_RESP,
ISE_GET_STORAGE_POOLS_RESP])
self.driver.get_volume_stats(True)
def test_create_volume(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_IONETWORKS_RESP])
exp_result = {}
exp_result = {"provider_auth": ""}
act_result = self.driver.create_volume(VOLUME1)
self.assertDictMatch(exp_result, act_result)
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.create_volume(VOLUME1)
def test_create_volume_chap(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_IONETWORKS_CHAP_RESP])
exp_result = {}
exp_result = {"provider_auth": "CHAP abc abc"}
act_result = self.driver.create_volume(VOLUME1)
self.assertDictMatch(exp_result, act_result)
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.create_volume(VOLUME1)
def test_create_volume_type_none(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_GET_IONETWORKS_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_CREATE_VOLUME_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.create_volume(VOLUME3)
def test_delete_volume(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_DELETE_VOLUME_RESP])
self.setup_driver()
self.driver.delete_volume(VOLUME1)
def test_delete_volume_none_existing(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.setup_driver()
self.driver.delete_volume(VOLUME2)
def test_initialize_connection_positive(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST2_RESP,
ISE_CREATE_HOST_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_CREATE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP])
self.setup_driver()
exp_result = {}
if self.configuration.ise_protocol == 'iscsi':
exp_result = {"driver_volume_type": "iscsi",
"data": {"target_lun": '1',
"volume_id": '1',
"access_mode": 'rw',
"target_discovered": False,
"target_iqn": ISE_IQN,
"target_portal": ISE_ISCSI_IP1 + ":3260"}}
elif self.configuration.ise_protocol == 'fibre_channel':
exp_result = {"driver_volume_type": "fibre_channel",
"data": {"target_lun": '1',
"volume_id": '1',
"access_mode": 'rw',
"target_discovered": True,
"initiator_target_map": ISE_INIT_TARGET_MAP,
"target_wwn": ISE_TARGETS}}
act_result =\
self.driver.initialize_connection(VOLUME1, self.connector)
self.assertDictMatch(exp_result, act_result)
def test_initialize_connection_positive_chap(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST2_RESP,
ISE_CREATE_HOST_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_CREATE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP])
self.setup_driver()
exp_result = {}
if self.configuration.ise_protocol == 'iscsi':
exp_result = {"driver_volume_type": "iscsi",
"data": {"target_lun": '1',
"volume_id": '2',
"access_mode": 'rw',
"target_discovered": False,
"target_iqn": ISE_IQN,
"target_portal": ISE_ISCSI_IP1 + ":3260",
'auth_method': 'CHAP',
'auth_username': 'abc',
'auth_password': 'abc'}}
elif self.configuration.ise_protocol == 'fibre_channel':
exp_result = {"driver_volume_type": "fibre_channel",
"data": {"target_lun": '1',
"volume_id": '2',
"access_mode": 'rw',
"target_discovered": True,
"initiator_target_map": ISE_INIT_TARGET_MAP,
"target_wwn": ISE_TARGETS}}
act_result =\
self.driver.initialize_connection(VOLUME2, self.connector)
self.assertDictMatch(exp_result, act_result)
def test_terminate_connection_positive(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_GET_CONTROLLERS_RESP])
self.driver.terminate_connection(VOLUME1, self.connector)
def test_terminate_connection_positive_noalloc(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
ISE_GET_ALLOC_WITH_NO_ALLOC_RESP,
ISE_GET_CONTROLLERS_RESP])
self.driver.terminate_connection(VOLUME1, self.connector)
def test_negative_terminate_connection_bad_host(self, mock_req):
self.setup_driver()
test_connector = {}
if self.configuration.ise_protocol == 'iscsi':
test_connector['initiator'] = 'bad_iqn'
test_connector['host'] = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
test_connector['wwpns'] = 'bad_wwn'
test_connector['host'] = ''
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_HOSTS_HOST1_RESP,
ISE_GET_CONTROLLERS_RESP])
self.driver.terminate_connection(VOLUME1, test_connector)
def test_create_snapshot(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP])
self.setup_driver()
self.driver.create_snapshot(SNAPSHOT1)
def test_negative_create_snapshot_invalid_state_recover(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_400_INVALID_STATE_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP])
self.setup_driver()
self.driver.create_snapshot(SNAPSHOT1)
def test_negative_create_snapshot_invalid_state_norecover(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_400_INVALID_STATE_RESP])
self.configuration.ise_completion_retries = 1
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.create_snapshot, SNAPSHOT1)
def test_negative_create_snapshot_conflict(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_409_CONFLICT_RESP])
self.configuration.ise_completion_retries = 1
self.setup_driver()
self.assertRaises(exception.XIODriverException,
self.driver.create_snapshot, SNAPSHOT1)
def test_delete_snapshot(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_ALLOC_WITH_EP_RESP,
ISE_DELETE_ALLOC_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_DELETE_VOLUME_RESP])
self.setup_driver()
self.driver.delete_snapshot(SNAPSHOT1)
def test_clone_volume(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_SNAP1_STATUS_RESP])
self.setup_driver()
self.driver.create_cloned_volume(CLONE1, VOLUME1)
def test_extend_volume(self, mock_req):
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.setup_driver()
self.driver.extend_volume(VOLUME1, NEW_VOLUME_SIZE)
def test_retype_volume(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
# New volume type
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "5",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT2', extra_specs)
specs = {'qos:minIOPS': '30',
'qos:maxIOPS': '3000',
'qos:burstIOPS': '10000'}
qos = qos_specs.create(ctxt, 'fake-qos2', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.setup_driver()
self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0)
def test_create_volume_from_snapshot(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
SNAPSHOT1['volume_type_id'] = type_ref['id']
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_SNAP1_STATUS_RESP,
ISE_PREP_SNAPSHOT_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_CREATE_SNAPSHOT_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.setup_driver()
self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1)
def test_manage_existing(self, mock_req):
ctxt = context.get_admin_context()
extra_specs = {"Feature:Pool": "1",
"Feature:Raid": "1",
"Affinity:Type": "flash",
"Alloc:Type": "thick"}
type_ref = volume_types.create(ctxt, 'VT1', extra_specs)
specs = {'qos:minIOPS': '20',
'qos:maxIOPS': '2000',
'qos:burstIOPS': '5000'}
qos = qos_specs.create(ctxt, 'fake-qos', specs)
qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id'])
VOLUME1['volume_type_id'] = type_ref['id']
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP,
ISE_GET_IONETWORKS_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'})
def test_manage_existing_no_source_name(self, mock_req):
self.setup_driver()
if self.configuration.ise_protocol == 'iscsi':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP,
ISE_GET_IONETWORKS_RESP])
elif self.configuration.ise_protocol == 'fibre_channel':
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP,
ISE_MODIFY_VOLUME_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.manage_existing, VOLUME1, {})
def test_manage_existing_get_size(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP])
exp_result = 10
act_result = \
self.driver.manage_existing_get_size(VOLUME1,
{'source-name': 'a'})
self.assertEqual(exp_result, act_result)
def test_manage_existing_get_size_no_source_name(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.manage_existing_get_size, VOLUME1, {})
def test_unmanage(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL1_STATUS_RESP])
self.driver.unmanage(VOLUME1)
def test_negative_unmanage_no_volume_status_xml(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL_STATUS_NO_STATUS_RESP])
self.driver.unmanage(VOLUME1)
def test_negative_unmanage_no_volume_xml(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL_STATUS_NO_VOL_NODE_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.unmanage, VOLUME1)
def test_negative_unmanage_non_existing_volume(self, mock_req):
self.setup_driver()
mock_req.side_effect = iter([ISE_GET_QUERY_RESP,
ISE_GET_VOL_STATUS_404_RESP])
self.assertRaises(exception.XIODriverException,
self.driver.unmanage, VOLUME1)
class XIOISEISCSIDriverTestCase(XIOISEDriverTestCase, test.TestCase):
def setUp(self):
super(XIOISEISCSIDriverTestCase, self).setUp()
self.setup_test('iscsi')
class XIOISEFCDriverTestCase(XIOISEDriverTestCase, test.TestCase):
def setUp(self):
super(XIOISEFCDriverTestCase, self).setUp()
self.setup_test('fibre_channel')
|
{
"content_hash": "94969499cbc4030817591139b9a32add",
"timestamp": "",
"source": "github",
"line_count": 1253,
"max_line_length": 79,
"avg_line_length": 39.38707102952913,
"alnum_prop": 0.5005268276868212,
"repo_name": "blueboxgroup/cinder",
"id": "37c1ee24131cf12b8c4fe2cc770c0795120aeb2d",
"size": "49990",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/test_xio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10024269"
},
{
"name": "Shell",
"bytes": "9905"
}
],
"symlink_target": ""
}
|
import unittest
from nose.tools import eq_
from redis_shard.hashring import HashRing
class TestHashRing(unittest.TestCase):
def test_remove_node(self):
replicas = 128
hash_ring_object = HashRing(
nodes=["redis01", "redis02"],
replicas=replicas,
)
hash_ring_object.remove_node("redis01")
eq_(hash_ring_object.nodes, ["redis02"])
eq_(list(hash_ring_object.ring.values()), ["redis02"] * replicas)
|
{
"content_hash": "cc174cda6343184e5d1d32de61ca847f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 26.38888888888889,
"alnum_prop": 0.6252631578947369,
"repo_name": "keakon/redis-shard",
"id": "575acf2bd2d75391fcd02616b512e42f27eab5b9",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hashring.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "39854"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.errors",
marshal="google.ads.googleads.v11",
manifest={"AssetSetErrorEnum",},
)
class AssetSetErrorEnum(proto.Message):
r"""Container for enum describing possible asset set errors.
"""
class AssetSetError(proto.Enum):
r"""Enum describing possible asset set errors."""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_ASSET_SET_NAME = 2
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "08523932d33e3a8a8cfcf7cf915468a4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 23.954545454545453,
"alnum_prop": 0.6565464895635673,
"repo_name": "googleads/google-ads-python",
"id": "ad7f8c4d4686335402aa6dc6f3de2353f2c95261",
"size": "1127",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/errors/types/asset_set_error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
import glance.api.v2.schemas
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
class TestSchemasController(test_utils.BaseTestCase):
def setUp(self):
super(TestSchemasController, self).setUp()
self.controller = glance.api.v2.schemas.Controller()
def test_image(self):
req = unit_test_utils.get_fake_request()
output = self.controller.image(req)
self.assertEqual(output['name'], 'image')
expected = set(['status', 'name', 'tags', 'checksum', 'created_at',
'disk_format', 'updated_at', 'visibility', 'self',
'file', 'container_format', 'schema', 'id', 'size',
'direct_url', 'min_ram', 'min_disk', 'protected',
'locations'])
self.assertEqual(set(output['properties'].keys()), expected)
def test_images(self):
req = unit_test_utils.get_fake_request()
output = self.controller.images(req)
self.assertEqual(output['name'], 'images')
expected = set(['images', 'schema', 'first', 'next'])
self.assertEqual(set(output['properties'].keys()), expected)
expected = set(['{schema}', '{first}', '{next}'])
actual = set([link['href'] for link in output['links']])
self.assertEqual(actual, expected)
|
{
"content_hash": "9e1771c1f90e9c43ef9f88de779791f1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 43.774193548387096,
"alnum_prop": 0.5946941783345615,
"repo_name": "cloudbau/glance",
"id": "dfa2d0e7cc0264cec86844e7c3a73c56a7602c5c",
"size": "1994",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "glance/tests/unit/v2/test_schemas_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2489476"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
}
|
r'''This module contains a base class for modeling computation graphs.
Neural networks are really just a concise, computational way of describing a
mathematical model of a computation graph that operates on a particular set of
data.
At a high level, a neural network is a computation graph that describes a
parametric mapping
.. math::
F_\theta: \mathcal{S} \to \mathcal{T}
between a source space :math:`\mathcal{S}` and a target space
:math:`\mathcal{T}`, using parameters :math:`\theta`. For example, suppose we
are processing vectors representing the MNIST handwritten digits. We could think
of :math:`\mathcal{S} = \mathbb{R}^{28 \times 28} = \mathbb{R}^{784}` (i.e., the
space of all 28×28 images), and for classifying the MNIST digits we could think
of :math:`\mathcal{T} = \mathbb{R}^{10}`.
This mapping is assumed to be fairly complex. If it were not -- if you could
capture the mapping using a simple expression like :math:`F_{\{a\}}(x) = ax^2`
-- then we would just use the expression directly and not need to deal with an
entire network. So if the mapping is complex, we will do a couple of things to
make our problem tractable. First, we will assume some structure for
:math:`F_\theta`. Second, we will fit our model to some set of data that we have
obtained, so that our parameters :math:`\theta` are tuned to the problem at
hand.
Graph structure
---------------
.. image:: _static/feedforward_layers.svg
The mapping :math:`F_\theta` is implemented in neural networks by assuming a
specific, layered form. Computation nodes -- also called units or (sometimes)
neurons -- are arranged in a :math:`k+1` partite graph, with layer :math:`k`
containing :math:`n_k` nodes. The number of input nodes in the graph is referred
to as :math:`n_0`.
Most layers are connected together using a set of weights. A **weight matrix**
:math:`W^k \in \mathbb{R}^{n_{k-1} \times n_k}` specifies the strength of the
connection between nodes in layer :math:`k` and those in layer :math:`k-1` --
all other pairs of nodes are typically not connected. Each layer of nodes also
typically has a **bias vector** that determines the offset of each node from the
origin. Together, the parameters :math:`\theta` of the model are these :math:`k`
weight matrices and :math:`k` bias vectors (there are no weights or biases for
the input nodes in the graph).
'''
import climate
import downhill
import fnmatch
import gzip
import hashlib
import numpy as np
import pickle
import theano
import theano.tensor as TT
import time
import warnings
from . import layers
from . import losses
from . import trainer
logging = climate.get_logger(__name__)
class Network(object):
'''The network class encapsulates a network computation graph.
In addition to defining standard functionality for common types of
feedforward nets, there are also many options for specifying topology and
regularization, several of which must be provided to the constructor at
initialization time.
Parameters
----------
layers : sequence of int, tuple, dict, or :class:`Layer <layers.Layer>`
A sequence of values specifying the layer configuration for the network.
For more information, please see :ref:`creating-specifying-layers`.
weighted : bool, optional
If True, the network will require an additional input during training
that provides weights for the target outputs of the network; the weights
will be the last input argument to the network, and they must be the
same shape as the target output.
This can be particularly useful for recurrent networks, where the length
of each input sequence in a minibatch is not necessarily the same number
of time steps, or for classifier networks where the prior proabibility
of one class is significantly different than another. The default is not
to use weighted outputs.
loss : str or :class:`Loss <losses.Loss>`
The name of a loss function to optimize when training this network
model.
sparse_input : bool
If True, create an input variable that can hold a sparse matrix.
Defaults to False, which assumes all arrays are dense.
Attributes
----------
loss : :class:`Loss <losses.Loss>`
A loss to be computed when optimizing this network model.
layers : list of :class:`Layer <layers.Layer>`
A list of the layers in this network model.
'''
def __init__(self, layers, loss='mse', weighted=False, sparse_input=False, **kwargs):
self._graphs = {} # cache of symbolic computation graphs
self._functions = {} # cache of callable feedforward functions
self.loss = losses.build(
loss, weighted=weighted, sparse_input=sparse_input, **kwargs)
self.layers = []
for i, layer in enumerate(layers):
self.add_layer(layer, is_output=i == len(layers) - 1)
logging.info('network has %d total parameters', self.num_params)
def add_layer(self, layer, is_output=False):
'''Add a layer to our network graph.
Parameters
----------
layer : int, tuple, dict, or :class:`Layer <layers.Layer>`
A value specifying the layer to add. For more information, please
see :ref:`creating-specifying-layers`.
is_output : bool, optional
True iff this is the output layer for the graph. This influences the
default activation function used for the layer: output layers in
most models have a linear activation, while output layers in
classifier networks default to a softmax activation.
'''
# if the given layer is a Layer instance, just add it and move on.
if isinstance(layer, layers.Layer):
self.layers.append(layer)
return
# for the first layer, create an 'input' layer.
if len(self.layers) == 0:
assert isinstance(layer, int), 'first layer must be an int'
self.layers.append(layers.build('input', size=layer, name='in'))
return
# here we set up some defaults for constructing a new layer.
act = getattr(self, 'DEFAULT_OUTPUT_ACTIVATION', 'linear')
form = 'feedforward'
kwargs = dict(
name='out' if is_output else 'hid{}'.format(len(self.layers)),
activation=act if is_output else 'relu',
inputs={self.layers[-1].output_name(): self.layers[-1].size},
size=layer,
)
# if layer is a tuple, assume that it contains one or more of the following:
# - a layers.Layer subclass to construct (type)
# - the name of a layers.Layer class (str)
# - the name of an activation function (str)
# - the number of units in the layer (int)
if isinstance(layer, (tuple, list)):
for el in layer:
try:
if issubclass(el, layers.Layer):
form = el.__name__
except TypeError:
pass
if isinstance(el, str):
if layers.Layer.is_registered(el):
form = el
else:
kwargs['activation'] = el
if isinstance(el, int):
kwargs['size'] = el
# if layer is a dictionary, try to extract a form for the layer, and
# override our default keyword arguments with the rest.
if isinstance(layer, dict):
layer = dict(layer)
if 'form' in layer:
form = layer.pop('form').lower()
kwargs.update(layer)
if isinstance(form, str) and form.lower() == 'bidirectional':
if not (isinstance(layer, dict) and 'name' in layer):
kwargs['name'] = 'bd{}{}'.format(
kwargs.get('worker', 'rnn'), len(self.layers))
if isinstance(form, str) and form.lower() == 'tied':
partner = kwargs.get('partner')
if isinstance(partner, str):
# if the partner is named, just get that layer.
partner = [l for l in self.layers if l.name == partner][0]
else:
# otherwise, we look backwards through our list of layers.
# any "tied" layer that we find increases a counter by one,
# and any "untied" layer decreases the counter by one. our
# partner is the first layer we find with count zero.
#
# this is intended to handle the hopefully common case of a
# (possibly deep) tied-weights autoencoder.
tied = 1
partner = None
for l in self.layers[::-1]:
tied += 1 if isinstance(l, layers.Tied) else -1
if tied == 0:
partner = l
break
assert partner is not None, \
'could not find tied layer partner for {} in {}'.format(
layer, self.layers)
kwargs['partner'] = partner
self.layers.append(layers.build(form, **kwargs))
def itertrain(self, train, valid=None, algo='rmsprop', subalgo='rmsprop',
save_every=0, save_progress=None, **kwargs):
'''Train our network, one batch at a time.
This method yields a series of ``(train, valid)`` monitor pairs. The
``train`` value is a dictionary mapping names to monitor values
evaluated on the training dataset. The ``valid`` value is also a
dictionary mapping names to values, but these values are evaluated on
the validation dataset.
Because validation might not occur every training iteration, the
validation monitors might be repeated for multiple training iterations.
It is probably most helpful to think of the validation monitors as being
the "most recent" values that have been computed.
After training completes, the network attribute of this class will
contain the trained network parameters.
Parameters
----------
train : :class:`Dataset <downhill.dataset.Dataset>` or list
A dataset to use when training the network. If this is a
``downhill.Dataset`` instance, it will be used directly as the
training datset. If it is a list of numpy arrays or a list of
callables, it will be converted to a ``downhill.Dataset`` and then
used as the training set.
valid : :class:`Dataset <downhill.dataset.Dataset>` or list, optional
If this is provided, it will be used as a validation dataset. If not
provided, the training set will be used for validation. (This is not
recommended!)
algo : str, optional
An optimization algorithm to use for training our network. If not
provided, :class:`RMSProp <downhill.adaptive.RMSProp>` will be used.
subalgo : str, optional
An optimization algorithm to use for a trainer that requires a
"sub-algorithm," sugh as an unsupervised pretrainer. Defaults to
:class:`RMSProp <downhill.adaptive.RMSProp>`.
save_every : int or float, optional
If this is nonzero and ``save_progress`` is not None, then the model
being trained will be saved periodically. If this is a float, it is
treated as a number of minutes to wait between savings. If it is an
int, it is treated as the number of training epochs to wait between
savings. Defaults to 0.
save_progress : str, optional
If this is not None, and ``save_progress`` is nonzero, then save the
model periodically during training. This parameter gives the full
path of a file to save the model. If this name contains a "{}"
format specifier, it will be filled with the integer Unix timestamp
at the time the model is saved. Defaults to None.
Yields
------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
def create_dataset(data, **kwargs):
name = kwargs.get('name', 'dataset')
s = '{}_batches'.format(name)
return downhill.Dataset(
data, name=name, batch_size=kwargs.get('batch_size', 32),
iteration_size=kwargs.get('iteration_size', kwargs.get(s)),
axis=kwargs.get('axis', 0))
# set up datasets ...
if valid is None:
valid = train
if not isinstance(valid, downhill.Dataset):
valid = create_dataset(valid, name='valid', **kwargs)
if not isinstance(train, downhill.Dataset):
train = create_dataset(train, name='train', **kwargs)
if 'algorithm' in kwargs:
warnings.warn(
'please use the "algo" keyword arg instead of "algorithm"',
DeprecationWarning)
algo = kwargs.pop('algorithm')
if isinstance(algo, (list, tuple)):
algo = algo[0]
# set up trainer ...
if isinstance(algo, str):
algo = algo.lower()
if algo == 'sample':
algo = trainer.SampleTrainer(self)
elif algo.startswith('layer') or algo.startswith('sup'):
algo = trainer.SupervisedPretrainer(subalgo, self)
elif algo.startswith('pre') or algo.startswith('unsup'):
algo = trainer.UnsupervisedPretrainer(subalgo, self)
else:
algo = trainer.DownhillTrainer(algo, self)
# set up check to save model ...
def needs_saving(elapsed, iteration):
if not save_progress:
return False
if isinstance(save_every, float):
return elapsed > 60 * save_every
if isinstance(save_every, int):
return iteration % save_every == 0
return False
# train it!
start = time.time()
for i, monitors in enumerate(algo.itertrain(train, valid, **kwargs)):
yield monitors
now = time.time()
if i and needs_saving(now - start, i):
self.save(save_progress.format(int(now)))
start = now
def train(self, *args, **kwargs):
'''Train the network until the trainer converges.
All arguments are passed to :func:`itertrain`.
Returns
-------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
monitors = None
for monitors in self.itertrain(*args, **kwargs):
pass
return monitors
def _hash(self, **kwargs):
'''Construct a string key for representing a computation graph.
This key will be unique for a given network topology and set of keyword
arguments.
Returns
-------
key : str
A hash representing the computation graph for the current network.
'''
def add(s):
h.update(str(s).encode('utf-8'))
h = hashlib.md5()
# See discussions
# https://groups.google.com/forum/#!topic/theanets/nL6Nis29B7Q
add(sorted(kwargs.items(), key=lambda x: x[0]))
for l in self.layers:
add('{}{}{}'.format(l.__class__.__name__, l.name, l.size))
return h.hexdigest()
def build_graph(self, **kwargs):
'''Connect the layers in this network to form a computation graph.
Parameters
----------
noise : dict mapping str to float, optional
A dictionary that maps layer output names to standard deviation
values. For an output "layer:output" in the graph, white noise with
the given standard deviation will be added to the output. Defaults
to 0 for all layer outputs.
dropout : dict mapping str to float in [0, 1], optional
A dictionary that maps layer output names to dropout values. For an
output "layer:output" in the graph, the given fraction of units in
the output will be randomly set to 0. Default to 0 for all layer
outputs.
Returns
-------
outputs : list of theano variables
A list of expressions giving the output of each layer in the graph.
updates : list of update tuples
A list of updates that should be performed by a theano function that
computes something using this graph.
'''
key = self._hash(**kwargs)
if key not in self._graphs:
noise = kwargs.get('noise')
if noise is None:
noise = {}
for i, l in enumerate(self.layers):
which = 'hidden_noise'
if i == 0:
which = 'input_noise'
if i == len(self.layers) - 1:
which = 'output_noise'
noise[l.output_name()] = kwargs.get(which, 0)
dropout = kwargs.get('dropout')
if dropout is None:
dropout = {}
for i, l in enumerate(self.layers):
which = 'hidden_dropouts'
if i == 0:
which = 'input_dropouts'
if i == len(self.layers) - 1:
which = 'output_dropouts'
dropout[l.output_name()] = kwargs.get(which, 0)
outputs, updates = dict(x=self.loss.input), []
for i, layer in enumerate(self.layers):
out, upd = layer.connect(outputs, noise, dropout)
outputs.update(out)
updates.extend(upd)
outputs['out'] = outputs[layer.output_name()]
self._graphs[key] = outputs, updates
return self._graphs[key]
@property
def params(self):
'''A list of the learnable theano parameters for this network.'''
return [p for l in self.layers for p in l.params]
@property
def num_params(self):
'''Number of parameters in the entire network model.'''
return sum(l.num_params for l in self.layers)
def find(self, layer, param):
'''Get a parameter from a layer in the network.
Parameters
----------
layer : int or str
The layer that owns the parameter to return.
If this is an integer, then 0 refers to the input layer, 1 refers
to the first hidden layer, 2 to the second, and so on.
If this is a string, the layer with the corresponding name, if any,
will be used.
param : int or str
Name of the parameter to retrieve from the specified layer, or its
index in the parameter list of the layer.
Raises
------
KeyError
If there is no such layer, or if there is no such parameter in the
specified layer.
Returns
-------
param : theano shared variable
A shared parameter variable from the indicated layer.
'''
for i, l in enumerate(self.layers):
if layer == i or layer == l.name:
return l.find(param)
raise KeyError(layer)
def feed_forward(self, x, **kwargs):
'''Compute a forward pass of all layers from the given input.
All keyword arguments are passed directly to :func:`build_graph`.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
layers : list of ndarray (num-examples, num-units)
The activation values of each layer in the the network when given
input `x`. For each of the hidden layers, an array is returned
containing one row per input example; the columns of each array
correspond to units in the respective layer. The "output" of the
network is the last element of this list.
'''
key = self._hash(**kwargs)
if key not in self._functions:
outputs, updates = self.build_graph(**kwargs)
labels, exprs = list(outputs.keys()), list(outputs.values())
self._functions[key] = (labels, theano.function(
[self.loss.input], exprs, updates=updates))
labels, f = self._functions[key]
return dict(zip(labels, f(x)))
def predict(self, x):
'''Compute a forward pass of the inputs, returning the network output.
Parameters
----------
x : ndarray (num-examples, num-variables)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
y : ndarray (num-examples, num-variables)
Returns the values of the network output units when given input `x`.
Rows in this array correspond to examples, and columns to output
variables.
'''
return self.feed_forward(x)[self.layers[-1].output_name()]
def score(self, x, y, w=None):
'''Compute R^2 coefficient of determination for a given labeled input.
Parameters
----------
x : ndarray (num-examples, num-inputs)
An array containing data to be fed into the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
y : ndarray (num-examples, num-outputs)
An array containing expected target data for the network. Multiple
examples are arranged as rows in this array, with columns containing
the variables for each example.
Returns
-------
r2 : float
The R^2 correlation between the prediction of this netork and its
target output.
'''
u = y - self.predict(x)
v = y - y.mean()
if w is None:
w = np.ones_like(u)
return 1 - (w * u * u).sum() / (w * v * v).sum()
def save(self, filename):
'''Save the state of this network to a pickle file on disk.
Parameters
----------
filename : str
Save the state of this network to a pickle file at the named path.
If this name ends in ".gz" then the output will automatically be
gzipped; otherwise the output will be a "raw" pickle.
'''
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'wb')
pickle.dump(self, handle, -1)
handle.close()
logging.info('%s: saved model', filename)
@classmethod
def load(cls, filename):
'''Load a saved network from disk.
Parameters
----------
filename : str
Load the state of a network from a pickle file at the named path. If
this name ends in ".gz" then the input will automatically be
gunzipped; otherwise the input will be treated as a "raw" pickle.
'''
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'rb')
model = pickle.load(handle)
handle.close()
logging.info('%s: loaded model', filename)
return model
def regularized_loss(self, **kwargs):
'''Return a variable representing the regularized loss for this network.
The regularized loss includes both the loss computation (the "error")
for the network as well as any regularizers that are in place.
Parameters
----------
weight_l1 : float, optional
Regularize the L1 norm of unit connection weights by this constant.
weight_l2 : float, optional
Regularize the L2 norm of unit connection weights by this constant.
hidden_l1 : float, optional
Regularize the L1 norm of hidden unit activations by this constant.
hidden_l2 : float, optional
Regularize the L2 norm of hidden unit activations by this constant.
contractive : float, optional
Regularize model using the Frobenius norm of the hidden Jacobian.
noise : float, optional
Standard deviation of desired noise to inject into input.
dropout : float in [0, 1], optional
Proportion of input units to randomly set to 0.
Returns
-------
loss : theano expression
A theano expression representing the loss of this network.
'''
outputs, _ = self.build_graph(**kwargs)
hiddens = [outputs[l.output_name()] for l in self.layers[1:-1]]
regularizers = dict(
weight_l1=(abs(w).mean() for l in self.layers
for w in l.params if w.ndim > 1),
weight_l2=((w * w).mean() for l in self.layers
for w in l.params if w.ndim > 1),
hidden_l1=(abs(h).mean() for h in hiddens),
hidden_l2=((h * h).mean() for h in hiddens),
contractive=(TT.sqr(TT.grad(h.mean(), self.loss.input)).mean()
for h in hiddens),
)
out = outputs[self.layers[-1].output_name()]
return self.loss(out) + sum(
kwargs[weight] * sum(expr)
for weight, expr in regularizers.items()
if kwargs.get(weight, 0) > 0)
def monitors(self, **kwargs):
'''Return expressions that should be computed to monitor training.
Returns
-------
monitors : list of (name, expression) pairs
A list of named monitor expressions to compute for this network.
'''
outputs, _ = self.build_graph(**kwargs)
out = outputs[self.layers[-1].output_name()]
monitors = [('err', self.loss(out))]
def parse_pattern(pattern):
'''Yield graph expressions that match the given pattern.'''
for name, expr in outputs.items():
if fnmatch.fnmatch(name, pattern):
yield name, expr
for l in self.layers:
for p in l.params:
if fnmatch.fnmatch(p.name, pattern):
yield p.name, p
def parse_levels(levels):
'''Yield named monitor callables.'''
if isinstance(levels, dict):
levels = levels.items()
if isinstance(levels, (int, float)):
levels = [levels]
for level in levels:
if isinstance(level, (tuple, list)):
label, call = level
yield ':{}'.format(label), call
if isinstance(level, (int, float)):
def call(expr):
return (expr < level).mean()
yield '<{}'.format(level), call
inputs = kwargs.get('monitors', {})
if isinstance(inputs, dict):
inputs = inputs.items()
for pattern, levels in inputs:
for name, expr in parse_pattern(pattern):
for key, value in parse_levels(levels):
monitors.append(('{}{}'.format(name, key), value(expr)))
return monitors
def updates(self, **kwargs):
'''Return expressions to run as updates during network training.
Returns
-------
updates : list of (parameter, expression) pairs
A list of named parameter update expressions for this network.
'''
_, updates = self.build_graph(**kwargs)
return updates
|
{
"content_hash": "76d3809da82ddf6ea84e1702aaf624c8",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 89,
"avg_line_length": 42.228070175438596,
"alnum_prop": 0.5902922032959423,
"repo_name": "devdoer/theanets",
"id": "e8ad6d5a927f23ee025e6e807f4e03ff9d0aadb5",
"size": "28910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theanets/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "227443"
}
],
"symlink_target": ""
}
|
import time
from random import randint
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
@pytest.fixture
def driver(request):
# 1) Chrome:
# wd = webdriver.Chrome()
# 2) Firefox:
# wd = webdriver.Firefox()
# 3) Edge:
wd = webdriver.Edge()
# print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_campaigns(driver):
wait = WebDriverWait(driver, 15)
password = str(randint(1000, 9999))
login = 'imiarek' + password
print(login + ':' + password)
# Go to registration form
driver.get("http://localhost/litecart/")
product = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#box-account-login tr")))[4]
link = product.find_element(By.CSS_SELECTOR, "a")
link.click()
# Fill the form
account_box = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#create-account.box")))[0]
account_box.find_element(By.CSS_SELECTOR, "input[name=firstname]").send_keys(login)
account_box.find_element(By.CSS_SELECTOR, "input[name=lastname]").send_keys(login)
account_box.find_element(By.CSS_SELECTOR, "input[name=address1]").send_keys('Line1')
account_box.find_element(By.CSS_SELECTOR, "input[name=postcode]").send_keys('12345')
account_box.find_element(By.CSS_SELECTOR, "input[name=city]").send_keys('CityN')
country_selector = account_box.find_element(By.CSS_SELECTOR, "#create-account.box select[name=country_code]")
selector = Select(country_selector)
selector.select_by_visible_text('United States')
state_selector = account_box.find_element(By.CSS_SELECTOR, "#create-account.box select[name=zone_code]")
wait.until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#create-account.box select[name=zone_code] option")))
selector = Select(state_selector)
selector.select_by_visible_text('Kansas')
account_box.find_element(By.CSS_SELECTOR, "input[name=email]").send_keys(login + '@example.com')
account_box.find_element(By.CSS_SELECTOR, "input[name=phone]").send_keys(Keys.HOME + "5555555555")
account_box.find_element(By.CSS_SELECTOR, "input[name=password]").send_keys(password)
account_box.find_element(By.CSS_SELECTOR, "input[name=confirmed_password]").send_keys(password)
account_box.find_element(By.CSS_SELECTOR, "button[name=create_account]").click()
# 1st logout
account_links = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#navigation #box-account li a")))
account_links[3].click()
# Login
login_form = \
wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#navigation form[name=login_form]")))[0]
login_form.find_element(By.CSS_SELECTOR, "input[name=email]").send_keys(login + '@example.com')
login_form.find_element(By.CSS_SELECTOR, "input[name=password]").send_keys(password)
login_form.find_element(By.CSS_SELECTOR, "button[name=login]").click()
# 2d logout
account_links = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#navigation #box-account li a")))
account_links[3].click()
|
{
"content_hash": "65bcf42b443317cc4004b84ea280f162",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 119,
"avg_line_length": 42.0625,
"alnum_prop": 0.7105497771173849,
"repo_name": "gennadykr/selenium-training",
"id": "85d17e0ec26cba85440493a98670828cbf36bfd4",
"size": "3365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selenium/python-example/test_litecart_window_add_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "529"
},
{
"name": "Python",
"bytes": "38347"
}
],
"symlink_target": ""
}
|
enough = lambda cap, on, wait: max(on + wait - cap, 0)
|
{
"content_hash": "a4dbee1918dffa8f55eaaf126b42fcf3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 54,
"avg_line_length": 55,
"alnum_prop": 0.6181818181818182,
"repo_name": "RevansChen/online-judge",
"id": "724664884c578e389d7ef9cd6989bea3931619c7",
"size": "73",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codewars/8kyu/will-there-be-enough-space/Python/solution1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask import render_template
import json
import os
import logdata.incoming
import logdata.predict
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
# Depricated
@app.route('/logdata/<userid>/<float:latitude>/<float:longitude>/<int:weekday>/<int:hour>/<int:minutesQuant>')
def logInputData(userid, latitude, longitude, weekday, hour, minutesQuant):
# Log the user data
inputData = logdata.incoming.Data(
userid, latitude, longitude, weekday, hour, minutesQuant)
responseJson = inputData.generateResponseJson()
return str(responseJson)
# Depricated
@app.route('/logdata/<userid>/predictlocation')
def predictedLocationData(userid):
responseJson = logdata.predict.locationPredict(userid)
return str(responseJson)
@app.route('/location-predict/api/v1/logdata/<userid>/<float:latitude>/<float:longitude>/<int:weekday>/<int:hour>/<int:minutesQuant>')
def logInputDataV1(userid, latitude, longitude, weekday, hour, minutesQuant):
# Log the user data
inputData = logdata.incoming.Data(
userid, latitude, longitude, weekday, hour, minutesQuant)
responseJson = inputData.generateResponseJson()
return str(responseJson)
@app.route('/location-predict/api/v1/predict-res/<userid>/<float:latitude>/<float:longitude>/<int:weekday>/<int:hour>/<int:minutesQuant>')
def predictedLocationDataV1(userid, latitude, longitude, weekday, hour, minutesQuant):
responseJson = logdata.predict.locationPredict(userid, latitude, longitude, weekday, hour, minutesQuant)
return str(responseJson)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
{
"content_hash": "b8914cb18e690e8abb55031fcebab81a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 138,
"avg_line_length": 34.854166666666664,
"alnum_prop": 0.7381948595337716,
"repo_name": "adeekshith/location-predict-server",
"id": "fbb4f3297ad41aab4f0d7f59727e08ed30f7f028",
"size": "1673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__main__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6269"
},
{
"name": "Python",
"bytes": "10040"
},
{
"name": "Shell",
"bytes": "3763"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('symposion_schedule', '0006_slot_title_override'),
]
operations = [
migrations.AddField(
model_name='presentation',
name='feedback_url',
field=models.URLField(null=True),
),
]
|
{
"content_hash": "402327f60d0d4d26a2250f754b5bd39a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 21.944444444444443,
"alnum_prop": 0.6050632911392405,
"repo_name": "pyohio/symposion",
"id": "67accb0de4406e51eda486af392cd523fe94635b",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/pyohio-2019",
"path": "symposion/schedule/migrations/0007_presentation_feedback_url.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "287626"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.