code
stringlengths 1
199k
|
|---|
from servicediscovery.client.client import ServiceClient, RegisteredServiceClient
from servicediscovery.client.server import ServiceRegistry
__all__ = ['ServiceClient', 'RegisteredServiceClient', 'ServiceRegistry']
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job_board', '0017_auto_20161225_0344'),
]
operations = [
migrations.AddField(
model_name='siteconfig',
name='mailchimp_api_key',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='siteconfig',
name='mailchimp_list_id',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='siteconfig',
name='mailchimp_username',
field=models.CharField(blank=True, max_length=20),
),
]
|
import os
import sys
import tensorflow as tf
import numpy as np
def get_weights(name, shape, stddev, trainable = True):
return tf.get_variable('weights{}'.format(name), shape,
initializer = tf.random_normal_initializer(stddev = stddev),
trainable = trainable)
def get_biases(name, shape, value, trainable = True):
return tf.get_variable('biases{}'.format(name), shape,
initializer = tf.constant_initializer(value),
trainable = trainable)
def get_dim(target):
dim = 1
for d in target.get_shape()[1:].as_list():
dim *= d
return dim
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear_layer(x, in_dim, out_dim, l_id):
weights = get_weights(l_id, [in_dim, out_dim], 1.0/np.sqrt(float(in_dim)))
biases = get_biases(l_id, [out_dim], 0.0)
return tf.matmul(x, weights) + biases
def conv_layer(inputs, out_num, filter_width, filter_hight, stride, l_id):
# ** NOTICE: weight shape is [hight, width, in_chanel, out_chanel] **
weights = get_weights(l_id,
[filter_hight, filter_width, inputs.get_shape()[-1], out_num],
0.02)
biases = get_biases(l_id, [out_num], 0.0)
conved = tf.nn.conv2d(inputs, weights,
strides=[1, stride, stride, 1],
padding = 'SAME')
return tf.nn.bias_add(conved, biases)
def deconv_layer(inputs, out_shape, filter_width, filter_hight, stride, l_id):
# ** NOTICE: weight shape is [hight, width, out_chanel, in_chanel] **
weights = get_weights(l_id,
[filter_hight, filter_width, out_shape[-1], inputs.get_shape()[-1]],
0.02)
biases = get_biases(l_id, [out_shape[-1]], 0.0)
deconved = tf.nn.conv2d_transpose(inputs, weights, output_shape = out_shape,
strides=[1, stride, stride, 1])
return tf.nn.bias_add(deconved, biases)
|
import unittest
import httpretty
import json
from six.moves.urllib.parse import urlparse, unquote
from pysnow.response import Response
from pysnow.client import Client
from pysnow.attachment import Attachment
from requests.exceptions import HTTPError
from pysnow.exceptions import (
ResponseError,
NoResults,
MultipleResults,
InvalidUsage,
MissingResult,
EmptyContent,
)
def get_serialized_result(dict_mock):
return json.dumps({"result": dict_mock})
def get_serialized_error(dict_mock):
return json.dumps({"error": dict_mock})
def qs_as_dict(url):
qs_str = urlparse(url).query
return dict(
(x[0], unquote(x[1])) for x in [x.split("=") for x in qs_str.split("&")]
)
class TestResourceRequest(unittest.TestCase):
"""Performs resource-request tests"""
def setUp(self):
self.error_message_body = {"message": "test_message", "detail": "test_details"}
self.record_response_get_dict = {
"sys_id": "98ace1a537ea2a00cf5c9c9953990e19",
"attr1": "foo",
"attr2": "bar",
}
self.record_response_get_one = [
{
"sys_id": "98ace1a537ea2a00cf5c9c9953990e19",
"attr1": "foo",
"attr2": "bar",
}
]
self.record_response_get_three = [
{
"sys_id": "37ea2a00cf5c9c995399098ace1a5e19",
"attr1": "foo1",
"attr2": "bar1",
},
{
"sys_id": "98ace1a537ea2a00cf5c9c9953990e19",
"attr1": "foo2",
"attr2": "bar2",
},
{
"sys_id": "a00cf5c9c9953990e1998ace1a537ea2",
"attr1": "foo3",
"attr2": "bar3",
},
]
self.record_response_create = {
"sys_id": "90e11a537ea2a00cf598ace9c99539c9",
"attr1": "foo_create",
"attr2": "bar_create",
}
self.record_response_update = {
"sys_id": "2a00cf5c9c99539998ace1a537ea0e19",
"attr1": "foo_updated",
"attr2": "bar_updated",
}
self.record_response_delete = {"status": "record deleted"}
self.client_kwargs = {
"user": "mock_user",
"password": "mock_password",
"instance": "mock_instance",
}
self.attachment = {
"sys_id": "attachment_sys_id",
"size_bytes": "512",
"file_name": "test1.txt",
}
self.attachment_path = "tests/data/attachment.txt"
self.base_path = "/api/now"
self.api_path = "/table/incident"
self.client = Client(**self.client_kwargs)
self.resource = self.client.resource(
base_path=self.base_path, api_path=self.api_path
)
self.mock_url_builder = self.resource._url_builder
self.attachment_upload_url = (
self.resource._base_url + self.resource._base_path + "/attachment/file"
)
self.mock_url_builder_base = self.resource._url_builder.get_url()
self.mock_url_builder_sys_id = self.mock_url_builder.get_appended_custom(
"/{0}".format(self.record_response_get_one[0]["sys_id"])
)
self.dict_query = {"sys_id": self.record_response_get_one[0]["sys_id"]}
self.get_fields = ["foo", "bar"]
def test_create_resource(self):
""":class:`Resource` object repr type should be string, and its path should be set to api_path + base_path """
r = self.client.resource(base_path=self.base_path, api_path=self.api_path)
resource_repr = type(repr(r))
self.assertEquals(resource_repr, str)
self.assertEquals(r._base_path, self.base_path)
self.assertEquals(r._api_path, self.api_path)
self.assertEquals(r.path, self.base_path + self.api_path)
@httpretty.activate
def test_response_headers(self):
"""Request response headers should be available in Response.headers property"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
status=200,
adding_headers={"x-test-1": "foo", "x-test-2": "bar"},
content_type="application/json",
)
response = self.resource.get(self.dict_query)
self.assertEqual(response.headers["x-test-1"], "foo")
self.assertEqual(response.headers["x-test-2"], "bar")
@httpretty.activate
def test_response_count(self):
""":prop:`count` of :class:`pysnow.Response` should raise an exception if count is set to non-integer"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
self.assertRaises(TypeError, setattr, response, "count", "foo")
self.assertRaises(TypeError, setattr, response, "count", True)
self.assertRaises(TypeError, setattr, response, "count", {"foo": "bar"})
@httpretty.activate
def test_response_error(self):
""":class:`pysnow.Response` should raise an exception if an error is encountered in the response body"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_error(self.error_message_body),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
expected_str = "Error in response. Message: %s, Details: %s" % (
self.error_message_body["message"],
self.error_message_body["detail"],
)
try:
response.first()
except ResponseError as e:
self.assertEquals(str(e), expected_str)
@httpretty.activate
def test_get_request_fields(self):
""":meth:`get_request` should return a :class:`pysnow.Response` object"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, fields=self.get_fields)
qs = qs_as_dict(response._response.request.url)
str_fields = ",".join(self.get_fields)
# List of fields should end up as comma-separated string
self.assertEquals(type(response), Response)
self.assertEquals(qs["sysparm_fields"], str_fields)
@httpretty.activate
def test_get_offset(self):
"""offset passed to :meth:`get` should set sysparm_offset in query"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
offset = 5
response = self.resource.get(self.dict_query, offset=offset)
qs = qs_as_dict(response._response.request.url)
self.assertEquals(int(qs["sysparm_offset"]), offset)
@httpretty.activate
def test_get_limit(self):
"""limit passed to :meth:`get` should set sysparm_limit in QS"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
limit = 2
response = self.resource.get(self.dict_query, limit=limit)
qs = qs_as_dict(response._response.request.url)
self.assertEquals(int(qs["sysparm_limit"]), limit)
@httpretty.activate
def test_get_one(self):
""":meth:`one` of :class:`pysnow.Response` should raise an exception if more than one match was found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
result = response.one()
self.assertEquals(result["sys_id"], self.record_response_get_one[0]["sys_id"])
@httpretty.activate
def test_get_all_empty(self):
""":meth:`all` generator of :class:`pysnow.Response` should return an empty list if there are no matches"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result([]),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
result = list(response.all())
self.assertEquals(result, [])
@httpretty.activate
def test_get_nocontent(self):
"""Result.one should raise EmptyContent for GET 202"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=202,
content_type="application/json",
)
result = self.resource.get(self.dict_query)
self.assertRaises(EmptyContent, result.one)
@httpretty.activate
def test_get_all_single(self):
"""Single items with all() using the stream parser should return a list containing the item"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_dict),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
result = list(response.all())[0]
self.assertEquals(result, self.record_response_get_dict)
@httpretty.activate
def test_get_buffer_missing_result_keys(self):
""":meth:`one` of :class:`pysnow.Response` should raise an exception if none of the expected keys
was found in the result"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=json.dumps({}),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
self.assertRaises(MissingResult, response.one)
@httpretty.activate
def test_get_stream_missing_result_keys(self):
""":meth:`one` of :class:`pysnow.Response` should raise an exception if none of the expected keys
was found in the result"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=json.dumps({}),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
self.assertRaises(MissingResult, response.first)
@httpretty.activate
def test_http_error_get_one(self):
""":meth:`one` of :class:`pysnow.Response` should raise an HTTPError exception if a
non-200 response code was encountered"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
status=500,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
self.assertRaises(HTTPError, response.one)
@httpretty.activate
def test_get_one_many(self):
""":meth:`one` of :class:`pysnow.Response` should raise an exception if more than one match was found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_three),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
self.assertRaises(MultipleResults, response.one)
@httpretty.activate
def test_get_one_empty(self):
""":meth:`one` of :class:`pysnow.Response` should raise an exception if no matches were found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result([]),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
self.assertRaises(NoResults, response.one)
@httpretty.activate
def test_get_one_or_none_empty(self):
""":meth:`one_or_none` of :class:`pysnow.Response` should return `None` if no matches were found """
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result([]),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query)
result = response.one_or_none()
self.assertEquals(result, None)
@httpretty.activate
def test_get_first_or_none_empty(self):
""":meth:`first_or_none` of :class:`pysnow.Response` should return None if no records were found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result([]),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
result = response.first_or_none()
self.assertEquals(result, None)
@httpretty.activate
def test_get_first_or_none(self):
""":meth:`first_or_none` of :class:`pysnow.Response` should return first match if multiple records were found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_three),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
result = response.first_or_none()
self.assertEquals(result, self.record_response_get_three[0])
@httpretty.activate
def test_get_first(self):
""":meth:`first` of :class:`pysnow.Response` should return first match if multiple records were found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_three),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
result = response.first()
self.assertEquals(result, self.record_response_get_three[0])
@httpretty.activate
def test_get_first_empty(self):
""":meth:`first` of :class:`pysnow.Response` should raise an exception if matches were found"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result([]),
status=200,
content_type="application/json",
)
response = self.resource.get(self.dict_query, stream=True)
self.assertRaises(NoResults, response.first)
@httpretty.activate
def test_create(self):
""":meth:`create` should return a dictionary of the new record"""
httpretty.register_uri(
httpretty.POST,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_create),
status=200,
content_type="application/json",
)
response = self.resource.create(self.record_response_create)
self.assertEquals(type(response.one()), dict)
self.assertEquals(response.one(), self.record_response_create)
@httpretty.activate
def test_update(self):
""":meth:`update` should return a dictionary of the updated record"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
httpretty.register_uri(
httpretty.PUT,
self.mock_url_builder_sys_id,
body=get_serialized_result(self.record_response_update),
status=200,
content_type="application/json",
)
response = self.resource.update(self.dict_query, self.record_response_update)
result = response.one()
self.assertEquals(type(result), dict)
self.assertEquals(self.record_response_update["attr1"], result["attr1"])
@httpretty.activate
def test_update_invalid_payload(self):
""":meth:`update` should raise an exception if payload is of invalid type"""
self.assertRaises(InvalidUsage, self.resource.update, self.dict_query, "foo")
self.assertRaises(InvalidUsage, self.resource.update, self.dict_query, False)
self.assertRaises(InvalidUsage, self.resource.update, self.dict_query, 1)
self.assertRaises(
InvalidUsage, self.resource.update, self.dict_query, ("foo", "bar")
)
self.assertRaises(
InvalidUsage, self.resource.update, self.dict_query, ["foo", "bar"]
)
@httpretty.activate
def test_delete(self):
""":meth:`delete` should return a dictionary containing status"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
httpretty.register_uri(
httpretty.DELETE,
self.mock_url_builder_sys_id,
body=get_serialized_result(self.record_response_delete),
status=204,
content_type="application/json",
)
result = self.resource.delete(self.dict_query)
self.assertEquals(type(result), dict)
self.assertEquals(result["status"], "record deleted")
@httpretty.activate
def test_delete_chained(self):
""":meth:`Response.delete` should return a dictionary containing status"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
httpretty.register_uri(
httpretty.DELETE,
self.mock_url_builder_sys_id,
body=get_serialized_result(self.record_response_delete),
status=204,
content_type="application/json",
)
result = self.resource.get(query={}).delete()
self.assertEquals(type(result), dict)
self.assertEquals(result["status"], "record deleted")
@httpretty.activate
def test_custom(self):
""":meth:`custom` should return a :class:`pysnow.Response` object"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
method = "GET"
response = self.resource.request(method)
self.assertEquals(response._response.request.method, method)
self.assertEquals(type(response), Response)
@httpretty.activate
def test_custom_with_headers(self):
"""Headers provided to :meth:`custom` should end up in the request"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
headers = {"foo": "bar"}
response = self.resource.request("GET", headers=headers)
self.assertEquals(response._response.request.headers["foo"], headers["foo"])
@httpretty.activate
def test_custom_with_path(self):
"""path_append passed to :meth:`custom` should get appended to the request path"""
path_append = "/foo"
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base + path_append,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
response = self.resource.request("GET", path_append="/foo")
self.assertEquals(response._response.status_code, 200)
@httpretty.activate
def test_custom_with_path_invalid(self):
""":meth:`custom` should raise an exception if the provided path is invalid"""
self.assertRaises(
InvalidUsage, self.resource.request, "GET", path_append={"foo": "bar"}
)
self.assertRaises(
InvalidUsage, self.resource.request, "GET", path_append="foo/"
)
self.assertRaises(InvalidUsage, self.resource.request, "GET", path_append=True)
@httpretty.activate
def test_response_repr(self):
""":meth:`get` should result in response obj repr describing the response"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
response = self.resource.get(query={})
response_repr = repr(response)
self.assertEquals(response_repr, "<Response [200 - GET]>")
def test_attachment_non_table(self):
"""Accessing `Resource.attachments` from a non-table API should fail"""
resource = self.client.resource(base_path=self.base_path, api_path="/invalid")
self.assertRaises(InvalidUsage, getattr, resource, "attachments")
def test_attachment_type(self):
"""`Resource.attachments` should be of type Attachment"""
attachment_type = type(self.resource.attachments)
self.assertEqual(attachment_type, Attachment)
def test_get_record_link(self):
"""`Resource.get_record_link()` should return full URL to the record"""
record_link = self.resource.get_record_link("98ace1a537ea2a00cf5c9c9953990e19")
self.assertEqual(record_link, self.mock_url_builder_sys_id)
@httpretty.activate
def test_get_response_item(self):
"""Accessing the response as a dict should work"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
response = self.resource.get(query={})
self.assertEquals(
response["sys_id"], self.record_response_get_one[0].get("sys_id")
)
@httpretty.activate
def test_get_buffered_first(self):
"""Using Response.first() without stream=True should fail"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
response = self.resource.get(query={})
self.assertRaises(InvalidUsage, response.first)
@httpretty.activate
def test_response_update(self):
"""Using Response.update should update the queried record"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
httpretty.register_uri(
httpretty.PUT,
self.mock_url_builder_sys_id,
body=get_serialized_result(self.record_response_update),
status=200,
content_type="application/json",
)
response = self.resource.get(query={}).update(self.record_response_update)
self.assertEqual(self.record_response_update["sys_id"], response["sys_id"])
@httpretty.activate
def test_response_upload(self):
"""Using Response.upload() should attach the file to the queried record and return metadata"""
httpretty.register_uri(
httpretty.GET,
self.mock_url_builder_base,
body=get_serialized_result(self.record_response_get_one),
status=200,
content_type="application/json",
)
httpretty.register_uri(
httpretty.POST,
self.attachment_upload_url,
body=get_serialized_result(self.attachment),
status=201,
content_type="application/json",
)
response = self.resource.get(query={}).upload(file_path=self.attachment_path)
self.assertEqual(self.attachment["file_name"], response["file_name"])
|
from __future__ import print_function
import json
import boto3
print('Loading function')
# def lambda_handler(event, context):
# #print("Received event: " + json.dumps(event, indent=2))
# # print("value1 = " + event['key1'])
# # print("value2 = " + event['key2'])
# # print("value3 = " + event['key3'])
# # return event['key1'] # Echo back the first key value
# return {"Hello":"*"*100}
# #raise Exception('Something went wrong')
def send_update_thing():
client = boto3.client('iot-data', region_name='us-east-1')
# Change topic, qos and payload
# response = client.publish(
# topic='$aws/things/rasberry_pi_test/shadow/update',
# qos=1,
# payload=json.dumps({"foo":"bar"}))
# return response
response = client.update_thing_shadow(
thingName='$aws/things/rasberry_pi_test/shadow/update',
payload=json.dumps({"foo":"bar"}))
def lambda_handler(event, context):
s3 = boto3.resource('s3')
bucket = "rasberrypi.api.data"
key = "smart_pillow_user.json"
obj = s3.Object(bucket, key)
result = obj.get()['Body'].read()#.decode('utf-8')
# result = result.decode('string_escape')
# result = json.loads(result)
# result = result.replace("\\","")
# send_update_thing()
# temp = json.dumps({"result": result})
result = eval(result)
temp = json.dumps({"result": result})
# temp = temp.replace("\\"," ")
return result
|
from __future__ import unicode_literals
import pytest
from dtypes.stack import Stack
@pytest.fixture
def base_stack():
return Stack([1, 2, 3])
def test_construct_from_iterable_valid(base_stack):
expected_output = "(1, 2, 3)"
assert base_stack.__str__() == expected_output
def test_construct_from_nested_iterable_valid():
arg = ([1, 2, 3], 'string')
expected_output = "([1, 2, 3], u'string')"
assert Stack(arg).__str__() == expected_output
def test_construct_from_string_valid():
arg = "string"
expected_output = "(u's', u't', u'r', u'i', u'n', u'g')"
assert Stack(arg).__str__() == expected_output
def test_construct_empty_valid():
expected_output = "()"
assert Stack().__str__() == expected_output
def test_construct_from_none_fails():
with pytest.raises(TypeError):
Stack(None)
def test_construct_from_single_integer_fails():
with pytest.raises(TypeError):
Stack(2)
def test_push(base_stack):
base_stack.push(4)
assert base_stack.__str__() == "(4, 1, 2, 3)"
def test_pop(base_stack):
assert base_stack.pop() == 1
assert base_stack.__str__() == "(2, 3)"
def test_pop_after_multi_push(base_stack):
for x in range(10):
base_stack.push(x)
assert base_stack.pop() == 9
|
import os
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..', '..')
SECRET_KEY = '6()*&j-ww(=j1&etsd%ws053m#wi_a+oas_hp1y*#4bp7!+w&y'
INTERNAL_IPS = '127.0.0.1'
EMAIL_HOST = 'mail.stowers.org'
EMAIL_PORT = 25
DEFAULT_FROM_EMAIL = 'contact@track'
EMAIL_DOMAIN = 'stowers.org'
DEFAULT_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
LOCAL_APPS = (
'roasted',
'bootstrapform',
#'debug_toolbar',
'contact_form',
)
INSTALLED_APPS = DEFAULT_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'tracks.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "templates"),
)
WSGI_APPLICATION = 'tracks.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, "static"),
#'/var/www/static/',
)
|
import dill as pickle
import pandas as pd
import numpy as np
import time
from datetime import datetime
"""
This module is responsible for processing each json object into the different
features necessary for the prediction model module so that the random forest
ensemble can make predictions on newly downloaded tweets, or tweets store in
a mongo database, so long as they are in the form of json objects in a list
"""
def process_tweet(tweet):
"""
Args:
tweet (json): single tweet object downloaded from twitter's api
Returns:
vectorized_tweet (2d numpy array): vectorized tweet in the format
needed for predictions and for further processing, as is needed
by the first and earlier version of making predictions
"""
profile_use_background_image = \
tweet['user']['profile_use_background_image']
geo_enabled = tweet['user']['geo_enabled']
verified = tweet['user']['verified']
followers_count = tweet['user']['followers_count']
default_profile_image = tweet['user']['default_profile_image']
listed_count = tweet['user']['listed_count']
statuses_count = tweet['user']['statuses_count']
friends_count = tweet['user']['friends_count']
favourites_count = tweet['user']['favourites_count']
favorite_count = tweet['favorite_count']
num_hashtags = len(tweet['entities']['hashtags'])
num_mentions = len(tweet['entities']['user_mentions'])
retweet_count = tweet['retweet_count']
tweet_date = convert_created_time_to_datetime(tweet['created_at'])
account_creation_date = \
convert_created_time_to_datetime(tweet['user']['created_at'])
time_difference = tweet_date - account_creation_date
account_age = time_difference.days+1
followers_friends = 1 if 2*followers_count > friends_count else 0
has_30_followers = 1 if followers_count >= 30 else 0
favorited_by_another = 1 if favourites_count > 0 else 0
has_hashtagged = 1 if num_hashtags > 0 else 0
has_mentions = 1 if num_mentions > 0 else 0
user_id = tweet['user']['id']
text = tweet['text']
screen_name = tweet['user']['screen_name']
user_vector = np.array([profile_use_background_image, geo_enabled,
verified, followers_count, default_profile_image,
listed_count, statuses_count, friends_count,
favourites_count, favorite_count, num_hashtags,
num_mentions, retweet_count, account_age,
followers_friends, has_30_followers,
favorited_by_another, has_hashtagged,
has_mentions])
text_vector = np.array([user_id, text, screen_name])
return np.hstack((user_vector, text_vector))
def process_tweet_v2(tweet):
"""
Args:
tweet (json): single tweet object downloaded from twitter's api
Returns:
vectorized_tweet (2d numpy array): the necessary row format needed
for predictions that takes into account the additional features
that look at behavior versus network information (i.e. tweets per
follower, likes per friend, etc)
"""
profile_use_background_image = \
tweet['user']['profile_use_background_image']
geo_enabled = tweet['user']['geo_enabled']
verified = tweet['user']['verified']
followers_count = tweet['user']['followers_count']
default_profile_image = tweet['user']['default_profile_image']
listed_count = tweet['user']['listed_count']
statuses_count = tweet['user']['statuses_count']
friends_count = tweet['user']['friends_count']
favourites_count = tweet['user']['favourites_count']
favorite_count = tweet['favorite_count']
num_hashtags = len(tweet['entities']['hashtags'])
num_mentions = len(tweet['entities']['user_mentions'])
retweet_count = tweet['retweet_count']
tweet_date = convert_created_time_to_datetime(tweet['created_at'])
account_creation_date = \
convert_created_time_to_datetime(tweet['user']['created_at'])
time_difference = tweet_date - account_creation_date
account_age = time_difference.days+1
followers_friends = 1 if 2*followers_count > friends_count else 0
has_30_followers = 1 if followers_count >= 30 else 0
favorited_by_another = 1 if favourites_count > 0 else 0
has_hashtagged = 1 if num_hashtags > 0 else 0
has_mentions = 1 if num_mentions > 0 else 0
user_id = tweet['user']['id']
text = tweet['text']
screen_name = tweet['user']['screen_name']
tweets_followers \
= -999 if followers_count == 0 else statuses_count / followers_count
tweets_friends \
= -999 if friends_count == 0 else statuses_count / friends_count
likes_followers \
= -999 if followers_count == 0 else favourites_count / followers_count
likes_friends \
= -999 if friends_count == 0 else favourites_count / friends_count
user_vector = np.array([profile_use_background_image, geo_enabled,
verified, followers_count, default_profile_image,
listed_count, statuses_count, friends_count,
favourites_count, favorite_count, num_hashtags,
num_mentions, retweet_count, account_age,
followers_friends, has_30_followers,
favorited_by_another, has_hashtagged,
has_mentions, tweets_followers, tweets_friends,
likes_followers, likes_friends])
text_vector = np.array([user_id, text, screen_name])
return np.hstack((user_vector, text_vector))
def convert_created_time_to_datetime(datestring):
"""
Args:
datestring (str): a string object either as a date or
a unix timestamp
Returns:
a pandas datetime object
"""
if len(datestring) == 30:
return pd.to_datetime(datestring)
else:
return pd.to_datetime(datetime.fromtimestamp(int(datestring[:10])))
if __name__ == "__main__":
start = time.time()
with open('data/test_tweet_scrape.pkl', 'r+') as f:
tweet_list = pickle.load(f)
print("load pkl file: ", time.time() - start)
start = time.time()
processed_tweets = np.array([process_tweet_v2(tweet)
for tweet in tweet_list])
print("process tweets tweets: ", time.time() - start)
|
import unittest
from dart.model.dataset import Column, DataFormat, Dataset, DatasetData, DataType, RowFormat, FileFormat, LoadType
from dart.model.exception import DartValidationException
from dart.schema.base import default_and_validate
from dart.schema.dataset import dataset_schema
class TestDatasetSchema(unittest.TestCase):
def test_dataset_schema(self):
columns = [Column('c1', DataType.VARCHAR, 50), Column('c2', DataType.BIGINT)]
num_header_rows = None
df = DataFormat(FileFormat.PARQUET, RowFormat.NONE, num_header_rows)
ds = Dataset(data=DatasetData(name='test-dataset',
table_name='test_dataset_table',
load_type=LoadType.INSERT,
location='s3://bucket/prefix',
data_format=df,
columns=columns,
tags=[]))
obj_before = ds.to_dict()
obj_after = default_and_validate(ds, dataset_schema()).to_dict()
# num_header_rows should have been defaulted to 0, making these unequal
self.assertNotEqual(obj_before, obj_after)
def test_dataset_schema_invalid(self):
with self.assertRaises(DartValidationException) as context:
columns = [Column('c1', DataType.VARCHAR, 50), Column('c2', DataType.BIGINT)]
df = DataFormat(FileFormat.PARQUET, RowFormat.NONE)
location = None
ds = Dataset(data=DatasetData(name='test-dataset', table_name='test_dataset_table', load_type=LoadType.INSERT,
location=location, data_format=df, columns=columns, tags=[]))
# should fail because location is required
default_and_validate(ds, dataset_schema())
self.assertTrue(isinstance(context.exception, DartValidationException))
if __name__ == '__main__':
unittest.main()
|
"""Creates a passphrase through PRNG dicerolls.
Needs the number of words and at least one word list.
"""
import random
import argparse
import csv
import os
__author__ = "Elliott Fawcett"
__copyright__ = "Copyright (c) 2014, elliottcf"
__license__ = "MIT"
__version__= "0.1"
__maintainer__ = "Elliott Fawcett"
__status__ = "Development"
parser = argparse.ArgumentParser(prog='DiceRollPW', description='Generates a passphrase from word lists.')
parser.add_argument('length', type=int, help='The number of words in the passphrase')
parser.add_argument('word_list_input', nargs='+', help='Word list files (csv or space-delimited')
args = parser.parse_args();
vars(args);
passphrase= [];
passphrase_length=args.length
word_list = [];
for filename in args.word_list_input:
with open(os.path.normpath(filename),'r') as fo:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(fo.read(32))
#Move the current position back to start of file
position = fo.seek(0,0)
#dialect.delimiter has the delimiter
list_delimiter = dialect.delimiter
filereader = csv.reader(fo, delimiter=list_delimiter, strict=True)
try:
for row in filereader:
if (row != ' '):
word_list.extend(row)
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (filename, filereader.line_num, e))
fo.close()
word_list = filter(None, word_list)
word_list_length = len(word_list)
number_gen = random.SystemRandom()
printing_pw=True
while (printing_pw):
for ppword in xrange(passphrase_length):
random_word = number_gen.randint(0,word_list_length-1)
passphrase.append(word_list[random_word])
print (' - ').join(passphrase);
print "Your passphrase is: "+('').join(passphrase)
print "Type \'r\' then \'Enter\' to generate another passphrase"
print "Press \'Enter\' to quit"
s = raw_input()
if (s != 'r'):
printing_pw=False
else:
passphrase[:]=[]
print "Next:"
"""
The MIT License (MIT)
Copyright (c) 2014 elliottcf
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
|
def loop1(x):
a = 0
for i in range(x):
x
a += i
return a
print(loop1(1))
print(loop1(2))
print(loop1(3))
print(loop1(4))
print(loop1(5))
print(loop1(6))
print(loop1(7))
|
import io
import types
import beretta
import kyoto.conf
import kyoto.utils.berp
try:
# Python 2.x
file = file
except NameError:
# Python 3.x
file = io.IOBase
def send(source):
if isinstance(source, file):
while source:
chunk = source.read(kyoto.conf.settings.READ_CHUNK_SIZE)
if not chunk:
break
yield kyoto.utils.berp.pack(chunk)
source.close()
elif isinstance(source, types.GeneratorType):
for chunk in source:
yield kyoto.utils.berp.pack(chunk)
else:
raise ValueError("Stream must be file-like or generator object")
yield b"\x00\x00\x00\x00"
def receive(connection, server=True):
receive_buffer = b""
while connection:
message = connection.recv(kyoto.conf.settings.READ_CHUNK_SIZE)
if message:
receive_buffer += message
while len(receive_buffer) >= 4:
try:
_, message, tail = kyoto.utils.berp.unpack(receive_buffer)
except ValueError as exception:
break # received incomplete packet, continue loop
except kyoto.utils.berp.MaxBERPSizeError as exception:
if server:
exception = (":error", (":protocol", 3, "MaxBERPSizeError", str(exception), []))
exception = kyoto.utils.berp.pack(beretta.encode(exception))
connection.sendall(exception)
raise
else:
receive_buffer = tail
yield message
else:
break
|
from inspect import isgeneratorfunction
def is_generator(obj):
return callable(obj) and (
isgeneratorfunction(obj) or isgeneratorfunction(getattr(obj, "__call__"))
)
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class StepTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.studio.v1.flows("FWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.engagements("FNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.steps.list()
self.holodeck.assert_has_request(Request(
'get',
'https://studio.twilio.com/v1/Flows/FWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Engagements/FNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Steps',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"previous_page_url": null,
"next_page_url": null,
"url": "https://studio.twilio.com/v1/Flows/FWaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Engagements/FNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Steps?PageSize=50&Page=0",
"page": 0,
"first_page_url": "https://studio.twilio.com/v1/Flows/FWaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Engagements/FNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Steps?PageSize=50&Page=0",
"page_size": 50,
"key": "steps"
},
"steps": []
}
'''
))
actual = self.client.studio.v1.flows("FWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.engagements("FNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.steps.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.studio.v1.flows("FWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.engagements("FNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.steps("FTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://studio.twilio.com/v1/Flows/FWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Engagements/FNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Steps/FTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "FTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"flow_sid": "FWaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"engagement_sid": "FNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"name": "incomingRequest",
"context": {},
"transitioned_from": "Trigger",
"transitioned_to": "Ended",
"date_created": "2017-11-06T12:00:00Z",
"date_updated": null,
"url": "https://studio.twilio.com/v1/Flows/FWaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Engagements/FNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Steps/FTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"step_context": "https://studio.twilio.com/v1/Flows/FWaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Engagements/FNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Steps/FTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Context"
}
}
'''
))
actual = self.client.studio.v1.flows("FWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.engagements("FNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.steps("FTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
|
"""Commands: "!notifications on/off", "!addnotification", "!delnotification"."""
import json
from twisted.internet import reactor
from bot.commands.abstract.command import Command
from bot.paths import NOTIFICATIONS_FILE
from bot.utilities.permission import Permission
from bot.utilities.tools import is_call_id_active
class Notifications(Command):
"""Sends notifications.
Send out notifications from a list in a set amount of time and
add or remove notifications from the list.
"""
perm = Permission.Moderator
def __init__(self, bot):
"""Initialize variables."""
self.responses = bot.config.responses["Notifications"]
self.active = False # It should be configured by the user if the notifications are on or off by default.
self.callID = None
self.listindex = 0
self.notification_interval = bot.config.config["notification_interval"]
with open(NOTIFICATIONS_FILE.format(bot.root), encoding="utf-8") as file:
self.notifications = json.load(file)
"""If notifications are enabled by default, start the threading."""
if self.active:
self.callID = reactor.callLater(
self.notification_interval, self.write_notification, bot
)
def raise_list_index(self):
"""Raise the listindex by 1 if it's exceeding the list's length reset the index.
Maybe randomizing the list after each run could make sense?
"""
self.listindex += 1
if self.listindex >= len(self.notifications):
self.listindex = 0
def write_notification(self, bot):
"""Write a notification in chat."""
if not self.active:
return
elif len(self.notifications) == 0:
self.active = False
bot.write(self.responses["empty_list"]["msg"])
return
"""Only write notifications if the bot is unpaused."""
if not bot.pause:
bot.write(self.notifications[self.listindex])
self.raise_list_index()
"""Threading to keep notifications running, if class active."""
self.callID = reactor.callLater(
self.notification_interval, self.write_notification, bot
)
def addnotification(self, bot, arg):
"""Add a new notification to the list."""
if arg not in self.notifications:
self.notifications.append(arg)
with open(
NOTIFICATIONS_FILE.format(bot.root), "w", encoding="utf-8"
) as file:
json.dump(self.notifications, file, indent=4)
bot.write(self.responses["notification_added"]["msg"])
else:
bot.write(self.responses["notification_exists"]["msg"])
def delnotification(self, bot, arg):
"""Add a new notification to the list."""
if arg in self.notifications:
self.notifications.remove(arg)
with open(
NOTIFICATIONS_FILE.format(bot.root), "w", encoding="utf-8"
) as file:
json.dump(self.notifications, file, indent=4)
bot.write(self.responses["notification_removed"]["msg"])
else:
bot.write(self.responses["notification_not_found"]["msg"])
def match(self, bot, user, msg, tag_info):
"""Match if a user is a trusted mod or admin and wants to turn notifications on or off.
Or if they want add or remove a notification from the list.
"""
if user in bot.config.trusted_mods or bot.get_permission(user) == 3:
if msg.lower().startswith("!notifications on") or msg.lower().startswith(
"!notifications off"
):
return True
elif (
msg.lower().startswith("!addnotification ")
or msg.lower().startswith("!delnotification ")
and len(msg.split(" ")) > 1
):
return True
return False
def run(self, bot, user, msg, tag_info):
"""Start/stop notifications or add/remove notifications from the list."""
if msg.lower().startswith("!notifications on"):
if not self.active:
self.active = True
self.callID = reactor.callLater(
self.notification_interval, self.write_notification, bot
)
bot.write(self.responses["notifications_activate"]["msg"])
else:
bot.write(self.responses["notifications_already_on"]["msg"])
elif msg.lower().startswith("!notifications off"):
if is_call_id_active(self.callID):
self.callID.cancel()
if self.active:
self.active = False
bot.write(self.responses["notifications_deactivate"]["msg"])
else:
bot.write(self.responses["notifications_already_off"]["msg"])
elif msg.lower().startswith("!addnotification "):
self.addnotification(bot, msg.split(" ", 1)[1])
elif msg.lower().startswith("!delnotification "):
self.delnotification(bot, msg.split(" ", 1)[1])
def close(self, bot):
"""Close the game."""
if is_call_id_active(self.callID):
self.callID.cancel()
self.active = False
|
import pyglet
from pyglet.gl import *
import rabbyt
import sys
def create_shadow(sprite, texture, x=0, y=0, rot=0, red=0, green=0, blue=0,
alpha=1):
shadow = rabbyt.Sprite(texture, scale=sprite.scale)
shadow.rgb = red, green, blue
shadow.alpha = sprite.attrgetter('alpha') * alpha
shadow.x = sprite.attrgetter('x') + x
shadow.y = sprite.attrgetter('y') + y
shadow.rot = sprite.attrgetter('rot') + rot
return shadow
def load_tileable_texture(name):
image = pyglet.resource.texture(name)
return pyglet.image.TileableTexture.create_for_image(image)
def save_screenshot(name='screenshot.png', format='RGB'):
image = pyglet.image.get_buffer_manager().get_color_buffer().image_data
image.format = format
image.save(name)
class MyWindow(pyglet.window.Window):
def __init__(self, **kwargs):
super(MyWindow, self).__init__(**kwargs)
self.set_exclusive_mouse(self.fullscreen)
self.set_exclusive_keyboard(self.fullscreen)
rabbyt.set_default_attribs()
glClearColor(1, 1, 1, 0)
self.background = load_tileable_texture('background.png')
self.ship_texture = pyglet.resource.texture('ship.png')
self.ship = rabbyt.Sprite(self.ship_texture,
alpha=rabbyt.ease(0, 1, dt=2),
rot=rabbyt.lerp(end=60, dt=1,
extend='extrapolate'))
self.shadow = create_shadow(self.ship, self.ship_texture, x=20, y=-30,
alpha=0.5)
self.time = 0.
def step(self, dt):
self.time += dt
rabbyt.set_time(self.time)
def on_draw(self):
self.clear()
glColor4f(1, 1, 1, 1)
self.background.blit_tiled(0, 0, 0, self.width, self.height)
glPushMatrix()
glTranslatef(self.width // 2, self.height // 2, 0)
self.shadow.render()
self.ship.render()
glPopMatrix()
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
self.on_close()
if symbol == pyglet.window.key.F12:
save_screenshot()
def main():
fullscreen = '--fullscreen' in sys.argv
window = MyWindow(fullscreen=fullscreen)
pyglet.clock.schedule_interval(window.step, 1. / 60.)
pyglet.app.run()
if __name__ == '__main__':
main()
|
import numpy as np
import numpy.ma as ma
import pylab as plt
import cmocean as cm
from scipy import linalg
from scipy.interpolate import interp2d,interp1d
from scipy.optimize import curve_fit,leastsq,least_squares
from scipy.optimize import minimize
from scipy import ndimage
from scipy.stats import pearsonr
from scipy import stats
import cartopy.crs as ccrs
from trackeddy.physics import *
from trackeddy.printfunc import *
import pdb
def fit_ellipse(x,y,diagnostics=False):
'''
**************** fit_ellipse *****************
Fitting of an ellipse to an array of positions.
Function translated form Matlab to python by Josue Martinez Moreno,
the original source:
Copyright (c) 2003, Ohad Gal
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
For more information go to the main source:
https://www.mathworks.com/matlabcentral/fileexchange/3215-fit-ellipse?requestedDomain=www.mathworks.com
Notes:
Args:
x,y (array): Coordinates of the datapoints to fit an ellipse.
diagnostics (boolean): Used to display all the statistics and plots to identify bugs.
Returns:
ellipse_t (dict) - This dictionary contains useful parameters describing completly the ellipsoid ajusted.
status (boolean) - This value will be true if and only if the the fit corresponds to a ellipse.
Usage:
R = np.arange(0,2*pi, 0.01)
x = 1.5*np.cos(R) + 2 + 0.1*np.random.rand(len(R))
y = np.sin(R) + 1. + 0.1*np.random.rand(len(R))
ellipse,status=fit_ellipse(x,y,diagnostics=False)
'''
orientation_tolerance = 1e-3
x=x[:]
y=y[:]
mean_x=np.mean(x)
mean_y=np.mean(y)
xp=x-mean_x
yp=y-mean_y
X=np.array([xp**2,xp*yp,yp**2,xp,yp]).T
a=np.sum(X,axis=0)
b=np.dot(X.T,X)
x2=np.linalg.lstsq(b.T, a.T)
res=[np.linalg.norm(ii) for ii in b-a*x2[0]]
r2 = np.mean(1 - res / (a.T.size * a.T.var()))
a,b,c,d,e=x2[0]
if b == 0 and a<c:
anglexaxis_rad=0
elif b==0 and c<a:
anglexaxis_rad=np.pi/2
else:
anglexaxis_rad = np.arctan((c-a-np.sqrt((a-c)**2+b**2))/b)
if ( min(abs(b/a),abs(b/c)) > orientation_tolerance ):
#TODO: Replace this non sign definite orientation_rad for anglexaxis
# which is a sign definite.
orientation_rad = 1/2 * np.arctan(b/(c-a))
cos_phi = np.cos( orientation_rad )
sin_phi = np.sin( orientation_rad )
a,b,c,d,e = [a*cos_phi**2 - b*cos_phi*sin_phi + c*sin_phi**2,0,a*sin_phi**2 + b*cos_phi*sin_phi + \
c*cos_phi**2,d*cos_phi - e*sin_phi,d*sin_phi + e*cos_phi]
mean_x,mean_y=cos_phi*mean_x - sin_phi*mean_y,sin_phi*mean_x + cos_phi*mean_y
else:
orientation_rad = 0
cos_phi = np.cos(orientation_rad)
sin_phi = np.sin(orientation_rad)
test = a*c
if test>0 :#and r2>0.8 and r2<1:
detect='Ellipse'
status=True
elif test==0:
detect='Parabola'
status=False
else:
detect='Hyperbola'
status=False
if status==True:
# final ellipse parameters
X0 = mean_x - (d/2)/a
Y0 = mean_y - (e/2)/c
F = 1 + (d**2)/(4*a) + (e**2)/(4*c)
a = np.sqrt(abs(F/a))
b = np.sqrt(abs(F/c))
long_axis = 2*max(a,b)
short_axis = 2*min(a,b)
# rotate the axes backwards to find the center point of the original TILTED ellipse
R = np.array([[ cos_phi,sin_phi],[-sin_phi,cos_phi ]])
P_in = np.dot(R, np.array([X0,Y0]))
X0_in = P_in[0]
Y0_in = P_in[1]
ver_line = np.array([ [X0,X0], [Y0-1*b, Y0+1*b]])
horz_line = np.array([ [X0-1*a,X0+1*a], [Y0,Y0] ])
new_ver_line = np.dot(R,ver_line)
new_horz_line = np.dot(R,horz_line)
### TODO Rotate the axis when the slope is positive.
theta_r = np.linspace(0,2*np.pi,len(y));
ellipse_x_r = X0 + a*np.cos(theta_r)
ellipse_y_r = Y0 + b*np.sin(theta_r)
rotated_ellipse = np.dot(R, np.array([ellipse_x_r,ellipse_y_r]))
# pack ellipse into a structure
ellipse_t = {'a':a,'b':b,'phi':anglexaxis_rad,'X0':X0,'Y0':Y0,\
'X0_in':X0_in,'Y0_in':Y0_in,'long_axis':long_axis,\
'short_axis':short_axis,'minoraxis':new_horz_line,\
'majoraxis':new_ver_line,'ellipse':rotated_ellipse\
,'status':'Cool'}
else:
# report an empty structure
ellipse_t = {'a':'','b':'','phi':'','X0':'','Y0':'',\
'X0_in':'','Y0_in':'','long_axis':'',\
'short_axis':'','minoraxis':'',\
'majoraxis':'','ellipse':'',\
'status':detect}
if (("ellipse" in diagnostics) or ("all" in diagnostics) or (True in diagnostics)) and status==True:
# draw
plt.plot( x,y,'b',label='data');
plt.plot( new_ver_line[0],new_ver_line[1],'k',label='minor axis' )
plt.plot( new_horz_line[0],new_horz_line[1],'b',label='major axis')
plt.plot( rotated_ellipse[0],rotated_ellipse[1],'r',label='Fitted ellipse $R^2$=%f' %r2 )
plt.legend(loc=1)
plt.show()
plt.plot(ellipse_x_r,ellipse_y_r,'m')
plt.plot(horz_line[0],horz_line[1])
plt.plot(ver_line[0],ver_line[1])
plt.show()
return ellipse_t,status,r2
def PolyArea(x,y):
'''
*************** PolyArea *******************
Calculate the area of a poligon.
Notes:
Args:
x,y (array): Coordinates of the datapoints to fit an ellipse.
Returns:
area (float) - Area contained by the poligon.
Usage:
R = np.arange(0,2*pi, 0.01)
x = 1.5*np.cos(R) + 2 + 0.1*np.random.rand(len(R))
y = np.sin(R) + 1. + 0.1*np.random.rand(len(R))
area=PolyArea(x,y)
'''
area=0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
return area
def eccentricity(a,b):
'''
*************** eccentricity *******************
This function calculate the eccentricity of a ellipse.
Notes:
Args:
a (float): Mayor axis of an ellipse
b (float): Minor axis of an ellipse
Returns:
eccen (float) - Eccentricity of the ellipsoid with parameters a,b.
Usage:
a=0.5
b=0.3
eccen=eccentricity(a,b)
'''
a=abs(a)
b=abs(b)
if b>a:
b1=a
a=b
b=b1
eccen=np.sqrt(1-(abs(b)**2/abs(a)**2))
return eccen
def find2l(arrayx,arrayy,valuex,valuey):
'''
*************** find2l *******************
Find values in two list of values.
Notes:
Args:
arrayx (list|array): Array where it will look the closer index to the valuex
arrayy (list|array): Array where it will look the closer index to the valuey
valuex (int|float): Value to look for in arrayx.
valuey (int|float): Value to look for in arrayy.
Returns:
idx,idy (int) - Index of closer values.
Usage:
arrayx=[0,1,2,3]
arrayy=[4,5,6,7]
valuex=2.2
valuey=6.6
indexes=find2l(arrayx,arrayy,valuex,valuey)
'''
idx=(np.abs(arrayx-valuex)).argmin()
idy=(np.abs(arrayy-valuey)).argmin()
return idx,idy
def find(array,value):
'''
*************** find *******************
Find values in a list of values.
Notes:
Args:
array (list|array): Array where it will look the closer index to the value
value (int|float): Value to look for in array.
Returns:
idx - Index of closer values.
Usage:
array=[0,1,2,3]
value=2.2
idx=find(array,value)
'''
idx=int(np.mean(np.where(array==value)))
return idx
def find2D(array,value):
'''
*************** find2D *******************
Find values in a 2D array of values.
Notes:
Args:
array (list|array): 2D Array where it will look the closer index to the value
value (int|float): Value to look for in array.
Returns:
yp,xp - Index of closer values.
Usage:
array=[[0,1,2,3],[0,1,4,3]]
value=2
idx,idy=find2D(array,value)
'''
yp,xp=np.where(array==value)
return yp,xp
def contourmaxvalue(var,x,y,levels,date=''):
'''
*************** contourmaxvalue *******************
Find the maximum value inside an specific contour.
Notes:
Args:
contcoordx (list|array): Contains the coordinates in X of the contour of the field var.
contcoordy (list|array): Contains the coordinates in Y of the contour of the field var.
var (array): 3D Matrix representing a surface (np.shape(var)=(date,len(x),len(y))).
x (list|arrat): Contains the coordinate X of the grid of var.
y (list|arrat): Contains the coordinate Y of the grid of var.
levels (list): Level of the extracted contour.
date (int): Used if len(var)==3 (i.e. Var contains a time dimension).
Returns:
coord (list) - Location of the max value in the grid.
Usage:
center_eddy=contourmaxvalue(contcoordx,contcoordx,sshnan,lon,lat,levels,date)
'''
if len(np.shape(var))==3 or date=='':
if levels[0]>0:
sshextrem=np.nanmax(var[date,:,:])
else:
sshextrem=np.nanmin(var[date,:,:])
indexes=find2D(var[date,:,:],sshextrem)
else:
if levels[0]>0:
sshextrem=np.nanmax(var)
else:
sshextrem=np.nanmin(var)
indexes=find2D(var[:,:],sshextrem)
coord=[x[indexes[1][0]],y[indexes[0][0]],sshextrem,indexes[1][0],indexes[0][0]]
return coord
def centroidvalue(contcoordx,contcoordy,var,x,y,levels,date,threshold=1):
'''
*************** centroidvalue *******************
Find the centroid inside an specific contour.
Notes:
Args:
contcoordx (list|array): Contains the coordinates in X of the contour of the field var.
contcoordy (list|array): Contains the coordinates in Y of the contour of the field var.
var (array): 3D Matrix representing a surface (np.shape(var)=(date,len(x),len(y))).
x (list|arrat): Contains the coordinate X of the grid of var.
y (list|arrat): Contains the coordinate Y of the grid of var.
levels (list): Level of the extracted contour.
date (int): Used if len(var)==3 (i.e. Var contains a time dimension).
Returns:
coord (list) - Location of the centroid in the grid.
Usage:
center_eddy=centroidvalue(contcoordx,contcoordx,sshnan,lon,lat,levels,date)
'''
if len(np.shape(var))==3:
var=var.filled(0)
var=np.abs(var)
sum_T=np.nansum(var[date,:,:])
sum_X=np.nansum(var[date,:,:],axis=0)
sum_Y=np.nansum(var[date,:,:],axis=1)
XM=0
for ii in range(len(sum_X)):
XM=XM+(sum_X[ii]*x[ii])
YM=0
for ii in range(len(sum_Y)):
YM=YM+(sum_Y[ii]*y[ii])
xcpos=XM/sum_T
ycpos=YM/sum_T
else:
var=var.filled(0)
var=np.abs(var)
sum_T=np.nansum(var)
sum_X=np.nansum(var,axis=0)
sum_Y=np.nansum(var,axis=1)
XM=0
for ii in range(len(sum_X)):
XM=XM+(sum_X[ii]*x[ii])
YM=0
for ii in range(len(sum_Y)):
YM=YM+(sum_Y[ii]*y[ii])
xcpos=XM/sum_T
ycpos=YM/sum_T
coord=np.asarray([xcpos,ycpos])
return coord
def adjust1Gaus(x,y):
'''
*************** adjust1Gaus *******************
Fit one gaussian in a curve curve.
Notes:
Args:
x(list|array): Coordinates in x of data.
y(list|array): Data to be ajusted with one gaussian.
Returns:
gausfit(list|array) - Data ajusted.
Usage:
x=np.arange(-5,5,0.1)
x0=0
a=3
sigma=2
gaussian=gaus(x,a,x0,sigma)
gaussianfit=adjust1Gaus(x,gaussian)
'''
gauss_fit = lambda p, x: p[0]*(1/np.sqrt(2*np.pi*(p[2]**2)))*np.exp(-(x-p[1])**2/(2*p[2]**2)) #1d Gaussian func
e_gauss_fit = lambda p, x, y: (gauss_fit(p,x) -y) #1d Gaussian fit
v0= [1,10,1,1,30,1] #inital guesses for Gaussian Fit. - just do it around the peaks
out = leastsq(e_gauss_fit, v0[:], args=(x, y), maxfev=2000, full_output=1) #Gauss Fit
v = out[0] #fit parameters out
covar = out[1] #covariance matrix output
gausfit = gauss_fit(v,x)
return gausfit
def twoD_Paraboloid(coords, amplitude, xo, yo, a, b,offset):
'''
*************** twoD_Gaussian *******************
Build a 2D gaussian.
Notes:
Remmember to do g.ravel().reshape(len(x),len(y)) for plotting purposes.
Args:
coords [x,y] (list|array): Coordinates in x and y.
amplitude (float): Amplitud of gaussian.
x0 , yo (float): Center of Gausian.
sigma_x,sigma_y (float): Deviation.
theta (Float): Orientation.
offset (Float): Gaussian Offset.
Returns:
g.ravel() (list|array) - Gaussian surface in a list.
Usage:
Check scan_eddym function.
'''
x=coords[0]
y=coords[1]
amplitude = coords[2]
xo = float(coords[3])
yo = float(coords[4])
#a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
#b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
#c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
#g =offset - amplitude * (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2))
g = -amplitude*(((x-x0)/a)**2+((y-y0)/b)**2) + offset
return g.ravel()
def twoD_Gaussian(coords, sigma_x, sigma_y, theta, slopex=0, slopey=0, offset=0):
'''
*************** twoD_Gaussian *******************
Build a 2D gaussian.
Notes:
Remmember to do g.ravel().reshape(len(x),len(y)) for plotting purposes.
Args:
coords [x,y] (list|array): Coordinates in x and y.
amplitude (float): Amplitud of gaussian.
x0 , yo (float): Center of Gausian.
sigma_x,sigma_y (float): Deviation.
theta (Float): Orientation.
offset (Float): Gaussian Offset.
Returns:
g.ravel() (list|array) - Gaussian surface in a list.
Usage:
Check scan_eddym function.
'''
x=coords[0]
y=coords[1]
amplitude = coords[2]
xo = float(coords[3])
yo = float(coords[4])
#print(sigma_y,sigma_x,sigma_y/sigma_x)
if sigma_y or sigma_x != 0:
#a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
#b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
#c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
#g = amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
cos_phi = np.cos(theta)
sin_phi = np.sin(theta)
a = (cos_phi**2)/(2*sigma_x**2) + (sin_phi**2)/(2*sigma_y**2)
b = (np.sin(2*theta))/(4*sigma_x**2) - (np.sin(2*theta))/(4*sigma_y**2)
c = (sin_phi**2)/(2*sigma_x**2) + (cos_phi**2)/(2*sigma_y**2)
g = amplitude*np.exp(-(a*(x-xo)**2 + 2*b*(x-xo)*(y-yo) + c*(y-yo)**2))
else:
g = (x-xo)*0 + (y-yo)*0
return g.ravel()
def gaussian2Dresidual(popt, coords, varm):
g=twoD_Gaussian(coords,*popt).reshape(np.shape(varm))
residual = np.exp(np.abs(np.float128(np.nanmean(abs(varm - g))))) - 1
#print('Residual:',np.nanmean(residual))
return residual
def paraboloid2Dresidual(popt,coords,varm):
residual = np.exp(np.abs(varm - twoD_Gaussian(coords,*popt))) - 1
return residual
def correlation_coefficient(data, data1):
product = np.mean((data - data.mean()) * (data1 - data1.mean()))
stds = data1.std() * data1.std()
if stds == 0:
return 0
else:
product = product/stds
return product
def fit2Dcurve(var,values,level,initial_guess='',date='',mode='gaussian',diagnostics=False):
'''
*************** fit2Dgaussian *******************
Fit a surface to the data.
Notes:
Args:
Returns:
Usage:
'''
if type(diagnostics) != list:
diagnostics=[diagnostics]
Lon, Lat = np.meshgrid(values[0], values[1])
coords=(Lon,Lat,values[2],values[3],values[4])
if date!='':
varm=var[date,:,:]*1
else:
varm=var*1
mask=ma.getmask(varm[:,:])
if initial_guess=='':
initial_guess = [1,1,0,0,0,0]
if mode == 'parabolic':
popt, pcov, infodict,mesg,ier = leastsq(paraboloid2Dresidual, initial_guess,\
args=(coords, varm.ravel()),full_output=True)#,\
fitdict = popt
fitted_curve = twoD_Paraboloid(coords, *fitdict)
elif mode == 'best':
popt, pcov = leastsq(paraboloid2Dresidual, initial_guess, args=(coords, varm.ravel()))
popt, pcov = leastsq(gaussian2Dresidual, initial_guess, args=(coords, varm.ravel()))
fitdict = popt
elif mode == 'both':
popt, pcov = leastsq(gaussian2Dresidual, initial_guess, args=(coords, varm.ravel()))
popt, pcov = leastsq(paraboloid2Dresidual, initial_guess, args=(coords, varm.ravel()))
fitdict = popt
else:
#print("\n ----Fit----")
#pdb.set_trace()
res = minimize(gaussian2Dresidual, initial_guess,args=(coords,varm),
method='SLSQP',options={'xtol': 1e-12, 'disp': False})
fitdict = res.x
#popt, pcov, infodict,mesg,ier = leastsq(gaussian2Dresidual, initial_guess,\
# args=(coords, varm.ravel()),\
# xtol=1e-10,maxfev=1000000,
# full_output=True)
#fitdict = popt
fitted_curve = twoD_Gaussian(coords, *fitdict)
fittedata=fitted_curve.reshape(len(values[1]), len(values[0]))
try:
R2=correlation_coefficient(varm.ravel(),fittedata.ravel())
except:
R2=0
if type(varm) == ma.core.MaskedArray:
o=sum(sum(varm.filled(0)))
g=sum(sum(ma.masked_array(fittedata, varm.mask).filled(0)))
if o*0.9 <= g and g <= o*1.1:
R2=0
if ("2dfit" in diagnostics) or ("all" in diagnostics) or (True in diagnostics):
print('R^2 2D fitting:',R2)
print('OPT steps:',res.nfev)
print(" |sigmaX|sigmaY|Theta|slopeX|slopeY|")
print("initial guess|" + ''.join(str(e)+'|' for e in initial_guess))
print("Fit. |" + ''.join(str(e)+'|' for e in fitdict))
f, (ax1, ax2,ax3) = plt.subplots(1, 3,figsize=(25,7),sharey=True)
p=ax1.pcolormesh(values[0],values[1],varm,vmin=varm.min(),vmax=varm.max())
ax1.set_title('Original Field')
ax1.axis('equal')
plt.colorbar(p,ax=ax1)
p=ax2.pcolormesh(values[0], values[1],fittedata,vmin=varm.min(),vmax=varm.max())
ax2.set_title('2D Gauss Fit')
ax2.axis('equal')
plt.colorbar(p,ax=ax2)
p=ax3.pcolormesh(values[0], values[1],varm-fittedata,\
cmap=cm.cm.balance)
ax3.axis('equal')
ax3.set_title('Difference between Fit & Original')
plt.colorbar(p,ax=ax3)
plt.show()
plt.close()
return fitdict,R2
def rsquard(y,yfit):
'''
*************** rsquard *******************
Calculate the Pearson Coefficient.
Notes:
Make sure the x grid coincide at least with indexes for y and y1.
Args:
y(list|array): Original data.
yfit(list|array): Data ajusted.
Returns:
R2 (float): Pearson Coefficient.
Usage:
x=np.arange(-5,5,0.1)
x0=0
a=3
sigma=2
gaussian=gaus(x,a,x0,sigma)+gaus(x,a-2,x0+2,sigma-1)
gaussianfit=adjustMGaus(x,gaussian)
R2=rsquard(gaussian,gaussianfit)
'''
#yhat=yfit # or [p(z) for z in x]
#ybar = np.sum(y)/len(y) # or sum(y)/len(y)
#ssreg = np.sum((y-yhat)**2) # or sum([ (yihat - ybar)**2 for yihat in yhat])
#sstot = np.sum((y - ybar)**2) # or sum([ (yi - ybar)**2 for yi in y])
#R2 = 1 - ssreg / sstot
R2=pearsonr(y,yfit)[0]**2
return R2
def ellipsefit(y,yfit,ellipsrsquarefit=0.85,diagnostics=False):
'''
*************** ellipsefit *******************
Check the fitness of an ellipse in a curve.
Notes:
Args:
y(list|array): Original data.
yfit(list|array): Ellipse ajusted to contour.
ellipsrsquarefit (float): [0 > ellipsrsquarefit < 1]
Pearson Coefficient to validate an ellipse.
diagnostics (boolean): Used to display all the
statistics and plots to identify bugs.
Returns:
Rsquard (float) - Fitness of the ellipse.
check (boolean) - True if gaussian adjust is greater than gaussrsquarefit.
Usage:
Check scan_eddym function.
'''
x=range(0,len(y))
f=interp1d(x, y)
xnew=np.linspace(0,len(y)-1,len(yfit))
eddy2fit=f(xnew)
indxed=find(yfit,yfit.max())
indxrd=find(eddy2fit,eddy2fit.max())
if yfit[indxed]==yfit[indxed+1] and yfit[indxed]==yfit[indxed-1]:
indxed=find(yfit,yfit.min())
indxrd=find(eddy2fit,eddy2fit.min())
eddy2fit=list(eddy2fit)*2
eddyfitdisplace=np.zeros(len(yfit))
for ii in range(len(yfit)):
eddyfitdisplace[ii]=eddy2fit[indxrd-indxed+ii]
Rsquard=rsquard(eddyfitdisplace,yfit)
if ("ellipse" in diagnostics) or ("all" in diagnostics) or (True in diagnostics):
plt.figure()
plt.title('Ellipse Fit')
plt.plot(yfit,'-b')
plt.plot(eddyfitdisplace,'-r')
plt.text(0, np.mean(yfit), str(round(Rsquard,2)))
plt.show()
print(sum(yfit),sum(eddyfitdisplace))
if Rsquard>=ellipsrsquarefit:# and Rsquard < 1:
check=True
else:
check=False
return Rsquard,check
def extractprofeddy(axis,field,lon,lat,n,gaus='One',kind='linear',gaussrsquarefit=0.65,varname='',diagnostics=False,plotprofile=False):
'''
*************** extractprofeddy *******************
Extracts the profile inside a segment.
Notes:
Primary used to extract the mayor or minor axis of an ellipse.
Args:
axis (list): Coordinates of an segment.
field (array): Surface where the profile will be extracted.
lon,lat (array|list): Coordinates fo the field.
n (int): Number of desired divisions in the segment.
gaus (Default:One|None|Multiple): Ajustment to segment.
kind (Default:linear|cubic|etc): Type of interpolation inside
segment (For more information check scipy.interpolate interp2d)
gaussrsquarefit (float): [0 > ellipsrsquarefit < 1]
Pearson Coefficient to validate an gaussian.
varname (str): Name of variable, just used for plots.
diagnostics (boolean): Used to display all the
statistics and plots to identify bugs.
Returns:
axisdata (array) - Data extracted in the segment.
check (boolean) - True if gaussian adjust is greater than gaussrsquarefit.
field_interp (array) -
Usage:
Check scan_eddym function.
'''
try:
fieldnan=field.filled(0)
except:
field[~np.isfinite(field)]=0
fieldnan=field
if type(diagnostics) != list:
diagnostics=[diagnostics]
#fieldnan=field
#print(nanmax(field),nanmin(field))
ycoord=np.linspace(axis[1,0],axis[1,1],n)
xcoord=np.linspace(axis[0,0],axis[0,1],n)
field2interp=interp2d(lon, lat, fieldnan[:,:], kind=kind)
field_interp = field2interp(xcoord,ycoord)
axisdata=np.zeros([n])
for ii in range(n):
axisdata[ii]=field_interp[ii,ii]
n = len(axisdata) #the number of data
x = np.array(range(n))
y = axisdata
p = np.poly1d(np.polyfit(x, y, 1))
slope, intercept, r_value, p_value, std_err = stats.linregress(y,p(x))
linearfit = rsquard(y,p(x))
if gaus=='None':
Rsquared=1
else:
if gaus=='One':
gausfit=adjust1Gaus(x,y)
elif gaus=='Multiple':
gausfit=adjustMGaus(x,y)
else:
print('Select a gausian method to adjust.')
return
Rsquared = rsquard(y,gausfit)
#print('R2_gauss: ',Rsquared)
#print('test 746 geo:',Rsquared)
if Rsquared >= gaussrsquarefit and linearfit < 0.5:
check=True
else:
check=False
if ("all" in diagnostics) or ("gauss" in diagnostics) or (True in diagnostics):
plt.plot(xcoord,ycoord)
plt.title('Gauss_fit')
plt.pcolormesh(xcoord,ycoord,field_interp)
plt.show()
print('std',varname,' vs fit',Rsquared)
plt.plot(x,y,'b+:',label='data')
plt.plot(x,gausfit,'ro:',label='fit')
plt.legend()
plt.title('Fit for Time Constant')
plt.xlabel('Position (n)')
plt.ylabel(varname)
plt.show()
if plotprofile==True:
return y,gausfit,check
else:
return y,check
def eddylandcheck(contour,lon,lat,var,diagnostics=False):
'''
*************** eddylandcheck *******************
Check if the contour is surrounded by land.
Notes:
Args:
Returns:
Usage:
'''
if type(diagnostics) != list:
diagnostics=[diagnostics]
land = {'True':{},'False':{}}
lcount = 0
fcount = 0
checkland = True
for ii in range(0,len(contour[:,0])):
idxcheck,idycheck = find2l(lon,lat,contour[ii,0],contour[ii,1])
if idxcheck == len(lon):
ar=0
elif idycheck == len(lat):
ar=0
else:
ar=1
checkarea=var[idycheck-ar:idycheck+ar,idxcheck-ar:idxcheck+ar]
if np.isnan(checkarea).any() and type(var)!=ma.core.MaskedArray:
land['True'][str(lcount)] = {'x':idxcheck,'y':idycheck}
lcount=lcount+1
elif type(var)==ma.core.MaskedArray and checkarea.mask.any() :
land['True'][str(lcount)] = {'x':idxcheck,'y':idycheck}
lcount=lcount+1
else:
land['False'][str(fcount)] = {'x':idxcheck,'y':idycheck}
fcount=fcount+1
if len(land['True']) >= len(contour[:,0])/2:
checkland=False
if ("landcheck" in diagnostics) or ("all" in diagnostics) or (True in diagnostics):
plt.figure()
plt.pcolormesh(lon,lat,var,cmap=cm.cm.deep)
plt.plot(contour[:,0],contour[:,1])
for key,item in land.items():
for key1,item1 in item.items():
if key == 'True':
plt.plot(lon[item1['x']],lat[item1['y']],'or',label='Invalid land grid points ($n_i = %d$)' %lcount if key1 == str(len(item.keys())-1) else "")
else:
plt.plot(lon[item1['x']],lat[item1['y']],'og',label='Valid grid points ($n_v = %d$)' %(len(contour[:,0])-lcount) if key1 == str(len(item.keys())-1) else "")
plt.title('Land check $n_g = %d$' % len(contour[:,0]) )
plt.legend()
return checkland
def reconstruct_syntetic(varshape,lon,lat,eddytd,mode='gaussian',rmbfit=False,usefullfit=False,diagnostics=False,one_time=None,debug=False):
'''
*************** reconstruct_syntetic *******************
Recunstruct the syntetic field using the gaussian
parameters saved in the dictionary of eddies.
Notes:
Args:
Returns:
Usage:
'''
if debug==True:
print("\n ******* Reconstruct ******")
pdb.set_trace()
Lon,Lat=np.meshgrid(lon,lat)
fieldfit=np.zeros(varshape)
if type(diagnostics) != list:
diagnostics=[diagnostics]
pp = Printer();
keys=tuple(eddytd.keys())
loop_len=len(keys)
for xx in range(0,loop_len):
key=keys[xx]
counter=0
if one_time!=None and type(one_time)==int:
timeloop=[one_time]
else:
timeloop=range(0,len(eddytd[key]['time']))
for tt in timeloop:
ttt=eddytd[key]['time'][tt]
level=eddytd[key]['level'][tt]
maxposition=[Lon,Lat,eddytd[key]['position_maxvalue'][counter][2],\
eddytd[key]['position_maxvalue'][counter][0],\
eddytd[key]['position_maxvalue'][counter][1]]
curvefit=eddytd[key]['2dgaussianfit'][counter]
if isinstance(curvefit, np.float64):
curvefit=eddytd[key]['2dgaussianfit']
#Remove the slope and constant in the reconstruction of the eddy.
if mode == 'parabolic':
fittedcurve=twoD_Paraboloid(maxposition, *curvefit[:])
#print(level)
if level>0:
fittedcurve[fittedcurve<0]=0
else:
fittedcurve[fittedcurve>0]=0
elif mode == 'best':
print('Work in progress')
elif mode == 'both':
print('Work in progress')
else:
#if usefullfit==False:
# curvefit[-1]=0
# curvefit[-2]=0
# curvefit[-3]=0
#print(gaussfit)
fittedcurve=twoD_Gaussian(maxposition, *curvefit)
#print(fittedcurve[0])
if np.isnan(fittedcurve[0]):
#or (curvefit[0]/curvefit[1]+curvefit[1]/curvefit[0])/2>1.7:
#or curvefit[0]/curvefit[1]>1.7 or curvefit[1]/curvefit[0]>1.7:
fittedcurve=np.zeros(np.shape(fittedcurve))
else:
fieldfit[ttt,:,:]=fieldfit[ttt,:,:]+fittedcurve.reshape(len(lat),len(lon))
#print(fieldfit[ttt,:,:])
#plt.pcolormesh(fieldfit[ttt,:,:])
#plt.show()
counter=counter+1
if ("reconstruct" in diagnostics) or ("all" in diagnostics) or (True in diagnostics):
ax = plt.axes(projection=ccrs.PlateCarree())
print('key: ',key,'Level: ',level)
plt.pcolormesh(lon[::5],lat[::5],fieldfit[0,::5,::5])
ax.coastlines()
plt.colorbar()
plt.show()
pp.timepercentprint(0,loop_len,1,xx,key)
return fieldfit
def insideness_contour(data,center,levels,mask=False,maskopt=None,diagnostics=False):
'''
'''
if type(diagnostics) != list:
diagnostics=[diagnostics]
data_rmove=np.array(np.zeros(np.shape(data)))
if (type(levels)==int or len(levels)==1 ) and levels < 0:
data_rmove[data<levels]=1
elif (type(levels)==int or len(levels)==1 ) and levels > 0:
data_rmove[data>levels]=1
elif levels[1]<0:
data_rmove[data<levels[1]]=1
else:
data_rmove[data>levels[0]]=1
markers,features=np.asarray(ndimage.label(data_rmove))
if markers.max()!=1 and maskopt==None:
markers=markers*0
returnmasked=True
elif markers.max()!=1 and maskopt=='maskomax':
plt.pcolormesh(markers)
plt.colorbar()
plt.title('1')
plt.show()
#TODO: Look into how to remove only the other maximum value
print(features,np.shape(markers))
for ii in range(features):
if ii != markers[center[0],center[1]-1]:
markers[markers==ii]==1
#markers=markers-markers[center[0],center[1]-1]
plt.pcolormesh(markers)
plt.colorbar()
plt.title('2')
plt.show()
returnmasked=True
elif markers.max()!=1 and (maskopt=='contour' or maskopt=='forcefit'):
if center[1]==np.shape(markers)[1]:
markers[markers!=markers[center[0],center[1]-1]]=0
elif center[0]==np.shape(markers)[0]:
markers[markers!=markers[center[0]-1,center[1]]]=0
else:
markers[markers!=markers[center[0],center[1]]]=0
markers=markers.max()-markers
returnmasked=True
elif maskopt=='contour' or maskopt=='forcefit':
markers=1-markers
returnmasked=True
else:
markers=markers*0
returnmasked=True
if levels[0]<0 and maskopt!='forcefit':
markers[data>0]=1
elif levels[0]>0 and maskopt!='forcefit':
markers[data<0]=1
elif levels[0]<0 and maskopt=='forcefit':
markers[np.multiply(data<levels[1]/2, data>levels[1]-levels[0]/4)]=0
data[np.multiply(data<levels[1]/2, data>levels[1]-levels[0]/4)]=0
elif levels[0]>0 and maskopt=='forcefit':
markers[np.multiply(data>levels[0]/2, data<levels[0]-levels[0]/4)]=0
data[np.multiply(data>levels[0]/2, data<levels[0]-levels[0]/4)]=0
maskeddata=ma.masked_array(data, markers)
if ("contours" in diagnostics) or ("all" in diagnostics) or (True in diagnostics):
print('markerceter:',markers[center[0],center[1]])
f, (ax1, ax2,ax3) = plt.subplots(1, 3,figsize=(15,7), sharey=True)
ax1.pcolormesh(data)
ax1.set_title('original data')
m=ax2.pcolormesh(markers)
plt.colorbar(m,ax=ax2)
msk=ax2.plot(center[1],center[0],'or')
ax2.set_title('identified eddy mask')
ax3.pcolormesh(maskeddata)
ax3.set_title('masked data')
plt.show()
if mask==True:
return maskeddata,markers
else:
return maskeddata
def gaussareacheck(values,level,areaparms,gauss2dfit,contour_area,contour_x=None,contour_y=None):
#print('gauss check')
Lon, Lat = np.meshgrid(values[0], values[1])
coords=(Lon,Lat,values[2],values[3],values[4])
fitted_curve = twoD_Gaussian(coords, *gauss2dfit)
fittedata = fitted_curve.reshape(len(values[1]),len(values[0]))
#fittedata = ma.masked_array(fittedata, mask)
try:
if level>0:
CS=plt.contour(values[0],values[1],fittedata,levels=[level,np.inf])
else:
CS=plt.contour(values[0],values[1],fittedata,levels=[level,np.inf])
plt.close()
CONTS=CS.allsegs[0][0]
areastatus = checkscalearea(areaparms,np.mean(CONTS[:,0]),np.mean(CONTS[:,1]),CONTS[:,0],CONTS[:,1])
except:
return False,0
if areastatus['ellipse'] == None:
test=False
elif (contour_area*1.5 > areastatus['ellipse']) and areastatus['status']: #and area[1]!=0:
test=True
else:
test=False
#print('---------',test)
return test,areastatus['ellipse']
def checkgaussaxis2D(a,b,a_g,b_g):
#print('a',a,a_g,'b',b,b_g)
if a*2<a_g or b*2<b_g:
return False
else:
return True
def check_closecontour(contour,lon_contour,lat_contour,var):
if (len(contour[:,0]) | len(contour[:,1])) <= 8:
return False
elif contour[0,0] != contour[-1,0] and contour[0,1] !=contour[-1,1]:
return False
else:
checkland=eddylandcheck(contour,lon_contour,lat_contour,var)
if checkland == False:
return False
else:
return True
|
"""Generate a python model equivalent to the generated verilog"""
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2013, Jonathan P Dawson"
__version__ = "0.1"
import chips_c
import sys
import math
import register_map
from chips.compiler.exceptions import StopSim, BreakSim, ChipsAssertionFail
from chips.compiler.exceptions import NoProfile
from utils import calculate_jumps
from chips_c import bits_to_float, float_to_bits, bits_to_double, double_to_bits, add, subtract
from chips_c import greater, greater_equal, unsigned_greater, unsigned_greater_equal
from chips_c import shift_left, shift_right, unsigned_shift_right
def to_32_signed(a):
if a & 0x80000000:
return a | (~0xffffffff)
return a
def to_64_signed(a):
if a & 0x8000000000000000:
return a | (~0xffffffffffffffff)
return a
def generate_python_model(
debug,
input_file,
name,
instructions,
allocator,
inputs,
outputs,
profile=False
):
instructions, initial_memory_contents = calculate_jumps(instructions, True)
input_files = set(
[i["file_name"] for i in instructions if "file_read" == i["op"]]
)
output_files = set(
[i["file_name"] for i in instructions if
i["op"].endswith("file_write")]
)
# map input numbers to port models
numbered_inputs = {}
for number, input_name in allocator.input_names.iteritems():
if input_name in inputs:
numbered_inputs[number] = inputs[input_name]
numbered_outputs = {}
for number, output_name in allocator.output_names.iteritems():
if output_name in outputs:
numbered_outputs[number] = outputs[output_name]
return PythonModel(
debug,
instructions,
initial_memory_contents,
input_files,
output_files,
numbered_inputs,
numbered_outputs,
profile,
)
class PythonModel:
"""create a python model equivalent to the generated verilog"""
def __init__(
self,
debug,
instructions,
memory_content,
input_files,
output_files,
inputs, outputs,
profile=False
):
self.debug = debug
self.profile = profile
self.instructions = instructions
self.memory_content = memory_content
self.input_file_names = input_files
self.output_file_names = output_files
self.inputs = inputs
self.outputs = outputs
self.breakpoints = {}
def simulation_reset(self):
"""reset the python model"""
self.program_counter = 0
self.register_hi = 0
self.register_hib = 0
self.carry = 0
self.memory = self.memory_content
self.registers = {}
self.address = 0
self.write_state = "wait_ack"
self.read_state = "wait_stb"
self.a_lo = 0
self.b_lo = 0
self.a_hi = 0
self.b_hi = 0
self.max_stack = 0
self.timer = 0
self.clock = 0
self.files = {}
self.input_files = {}
for file_name in self.input_file_names:
file_ = open(file_name)
self.input_files[file_name] = file_
self.output_files = {}
for file_name in self.output_file_names:
file_ = open(file_name, "w")
self.output_files[file_name] = file_
def get_line(self):
trace = self.instructions[self.program_counter]["trace"]
return trace.lineno
def get_file(self):
trace = self.instructions[self.program_counter]["trace"]
return trace.filename
def get_profile(self):
if not self.profile:
raise NoProfile
return self.files
def get_registers(self):
return self.registers
def get_memory(self):
return self.memory
def get_instruction(self):
return self.instructions[self.program_counter]
def get_program_counter(self):
return self.program_counter
def set_breakpoint(self, f, l):
lines = self.breakpoints.get(f, {})
lines[l] = True
self.breakpoints[f] = lines
def clear_breakpoint(self, f, l):
lines = self.breakpoints.get(f, {})
lines.pop(l)
self.breakpoints[f] = lines
def step_into(self):
"""run until a different line (e.g jump into functions)"""
l = self.get_line()
f = self.get_file()
while(l == self.get_line() and f == self.get_file()):
self.simulation_step()
def step_over(self):
"""run until the next line (e.g. skip over functions)"""
l = self.get_line()
f = self.get_file()
while(self.get_line() <= l and self.get_file != f):
self.simulation_step()
def simulation_step(self):
"""execute the python simulation by one step"""
l = self.get_line()
f = self.get_file()
if f in self.breakpoints:
if l in self.breakpoints[f]:
raise BreakSim
instruction = self.instructions[self.program_counter]
current_stack = self.registers.get(register_map.tos, 0)
self.max_stack = max([current_stack, self.max_stack])
if self.profile:
trace = instruction.get("trace", "-")
lines = self.files.get(trace.filename, {})
lines[trace.lineno] = lines.get(trace.lineno, 0) + 1
self.files[trace.filename] = lines
if "literal" in instruction:
literal = instruction["literal"]
if "label" in instruction:
literal = instruction["label"]
# read operands
#
a = instruction.get("a", 0)
b = instruction.get("b", 0)
z = instruction.get("z", 0)
operand_b = self.registers.get(b, 0)
operand_a = self.registers.get(a, 0)
this_instruction = self.program_counter
self.program_counter += 1
wait = False
result = None
if instruction["op"] == "stop":
self.program_counter = this_instruction
wait = True
for file_ in self.input_files.values():
file_.close()
for file_ in self.output_files.values():
file_.close()
raise StopSim
elif instruction["op"] == "literal":
if literal & 0x8000:
result = -65536 | literal
else:
result = literal
result &= 0xffffffff
elif instruction["op"] == "addl":
if literal & 0x8000:
sext = -65536 | literal
else:
sext = literal
result = sext + operand_a
result &= 0xffffffff
elif instruction["op"] == "literal_hi":
if literal & 0x8000:
sext = -65536 | literal
else:
sext = literal
result = (sext << 16) | (operand_a & 0x0000ffff)
result &= 0xffffffff
elif instruction["op"] == "store":
self.memory[operand_a] = operand_b
elif instruction["op"] == "load":
result = self.memory.get(operand_a, 0)
elif instruction["op"] == "call":
result = this_instruction + 1
self.program_counter = literal
elif instruction["op"] == "return":
self.program_counter = operand_a
elif instruction["op"] == "a_lo":
result = self.a_lo
self.a_lo = operand_a
elif instruction["op"] == "b_lo":
result = self.b_lo
self.b_lo = operand_a
elif instruction["op"] == "a_hi":
result = self.a_hi
self.a_hi = operand_a
elif instruction["op"] == "b_hi":
result = self.b_hi
self.b_hi = operand_a
elif instruction["op"] == "not":
result = (~operand_a) & 0xffffffff
elif instruction["op"] == "int_to_long":
if operand_a & 0x80000000:
result = 0xffffffff
else:
result = 0
elif instruction["op"] == "int_to_float":
f = float(to_32_signed(self.a_lo))
self.a_lo = float_to_bits(f)
elif instruction["op"] == "float_to_int":
i = bits_to_float(self.a_lo)
if math.isnan(i):
self.a_lo = 0
else:
self.a_lo = int(i) & 0xffffffff
elif instruction["op"] == "long_to_double":
double = float(to_64_signed(chips_c.join_words(self.a_hi, self.a_lo)))
if math.isnan(double):
self.a_hi = 0
self.a_lo = 0
else:
self.a_hi = chips_c.high_word(double_to_bits(double))
self.a_lo = chips_c.low_word(double_to_bits(double))
elif instruction["op"] == "double_to_long":
bits = int(bits_to_double(chips_c.join_words(self.a_hi, self.a_lo)))
bits &= 0xffffffffffffffff
self.a_hi = chips_c.high_word(bits)
self.a_lo = chips_c.low_word(bits)
elif instruction["op"] == "float_to_double":
f = bits_to_float(self.a_lo)
bits = double_to_bits(f)
self.a_hi = chips_c.high_word(bits)
self.a_lo = chips_c.low_word(bits)
elif instruction["op"] == "double_to_float":
f = bits_to_double(chips_c.join_words(self.a_hi, self.a_lo))
self.a_lo = float_to_bits(f)
elif instruction["op"] == "add":
total = add(operand_a, operand_b, 0);
result = total.lo
self.carry = total.hi
elif instruction["op"] == "add_with_carry":
total = add(operand_a, operand_b, self.carry);
result = total.lo
self.carry = total.hi
elif instruction["op"] == "subtract":
total = subtract(operand_a, operand_b, 1);
result = total.lo
self.carry = total.hi
elif instruction["op"] == "subtract_with_carry":
total = subtract(operand_a, operand_b, self.carry);
result = total.lo
self.carry = total.hi
elif instruction["op"] == "multiply":
lw = operand_a * operand_b
self.carry = chips_c.high_word(lw)
result = chips_c.low_word(lw)
elif instruction["op"] == "divide":
a = operand_a
b = operand_b
result = chips_c.divide(a, b)
elif instruction["op"] == "unsigned_divide":
a = operand_a
b = operand_b
result = chips_c.unsigned_divide(a, b)
elif instruction["op"] == "modulo":
a = operand_a
b = operand_b
result = chips_c.modulo(a, b)
elif instruction["op"] == "unsigned_modulo":
a = operand_a
b = operand_b
result = chips_c.unsigned_modulo(a, b)
elif instruction["op"] == "long_divide":
a = chips_c.join_words(self.a_hi, self.a_lo)
b = chips_c.join_words(self.b_hi, self.b_lo)
quotient = chips_c.long_divide(a, b)
self.a_hi = chips_c.high_word(quotient)
self.a_lo = chips_c.low_word(quotient)
elif instruction["op"] == "long_modulo":
a = chips_c.join_words(self.a_hi, self.a_lo)
b = chips_c.join_words(self.b_hi, self.b_lo)
remainder = chips_c.long_modulo(a, b)
self.a_hi = chips_c.high_word(remainder)
self.a_lo = chips_c.low_word(remainder)
elif instruction["op"] == "unsigned_long_divide":
a = chips_c.join_words(self.a_hi, self.a_lo)
b = chips_c.join_words(self.b_hi, self.b_lo)
quotient = chips_c.unsigned_long_divide(a, b)
self.a_hi = chips_c.high_word(quotient)
self.a_lo = chips_c.low_word(quotient)
elif instruction["op"] == "unsigned_long_modulo":
a = chips_c.join_words(self.a_hi, self.a_lo)
b = chips_c.join_words(self.b_hi, self.b_lo)
remainder = chips_c.unsigned_long_modulo(a, b)
self.a_hi = chips_c.high_word(remainder)
self.a_lo = chips_c.low_word(remainder)
elif instruction["op"] == "carry":
result = self.carry
elif instruction["op"] == "or":
result = operand_a | operand_b
elif instruction["op"] == "and":
result = operand_a & operand_b
elif instruction["op"] == "xor":
result = operand_a ^ operand_b
elif instruction["op"] == "shift_left":
total = shift_left(operand_a, operand_b, 0)
result = total.lo
self.carry = total.hi
elif instruction["op"] == "shift_left_with_carry":
total = shift_left(operand_a, operand_b, self.carry)
result = total.lo
self.carry = total.hi
elif instruction["op"] == "shift_right":
total = shift_right(operand_a, operand_b)
result = total.lo
self.carry = total.hi
elif instruction["op"] == "unsigned_shift_right":
total = unsigned_shift_right(operand_a, operand_b, 0)
result = total.lo
self.carry = total.hi
elif instruction["op"] == "shift_right_with_carry":
total = unsigned_shift_right(operand_a, operand_b, self.carry)
result = total.lo
self.carry = total.hi
elif instruction["op"] == "greater":
result = greater(operand_a, operand_b)
elif instruction["op"] == "greater_equal":
result = greater_equal(operand_a, operand_b)
elif instruction["op"] == "unsigned_greater":
result = unsigned_greater(operand_a, operand_b)
elif instruction["op"] == "unsigned_greater_equal":
result = unsigned_greater_equal(operand_a, operand_b)
elif instruction["op"] == "equal":
result = operand_a == operand_b
elif instruction["op"] == "not_equal":
result = operand_a != operand_b
elif instruction["op"] == "jmp_if_false":
if operand_a == 0:
self.program_counter = literal
elif instruction["op"] == "jmp_if_true":
if operand_a != 0:
self.program_counter = literal
elif instruction["op"] == "goto":
self.program_counter = literal
elif instruction["op"] == "timer_low":
result = self.clock&0xffffffff
elif instruction["op"] == "timer_high":
result = self.clock>>32
elif instruction["op"] == "file_read":
value = self.input_files[instruction["filename"]].getline()
result = value
elif instruction["op"] == "float_file_write":
self.output_files[instruction["file_name"]].write(
"%.7f\n" %
bits_to_float(operand_a))
elif instruction["op"] == "unsigned_file_write":
self.output_files[instruction["file_name"]].write(
"%i\n" %
operand_a)
elif instruction["op"] == "file_write":
self.output_files[instruction["file_name"]].write(
"%i\n" %
to_32_signed(operand_a))
elif instruction["op"] == "read":
if operand_a not in self.inputs:
result = 0
else:
input_ = self.inputs[operand_a]
if input_.src_rdy and input_.dst_rdy:
result = input_.q
input_.next_dst_rdy = False
else:
input_.next_dst_rdy = True
wait = True
elif instruction["op"] == "ready":
if operand_a not in self.inputs:
operand_a = 0
else:
input_ = self.inputs[operand_a]
if input_.src_rdy:
result = 1
else:
result = 0
elif instruction["op"] == "output_ready":
if operand_a not in self.outputs:
operand_a = 0
else:
output_ = self.outputs[operand_a]
if output_.dst_rdy:
result = 1
else:
result = 0
elif instruction["op"] == "write":
if operand_a not in self.outputs:
pass
else:
output_ = self.outputs[operand_a]
if output_.src_rdy and output_.dst_rdy:
output_.next_src_rdy = False
else:
output_.q = operand_b
output_.next_src_rdy = True
wait = True
elif instruction["op"] == "float_add":
a = operand_a
b = operand_b
float_ = bits_to_float(a)
floatb = bits_to_float(b)
result = float_to_bits(float_ + floatb)
elif instruction["op"] == "float_subtract":
a = operand_a
b = operand_b
float_ = bits_to_float(a)
floatb = bits_to_float(b)
result = float_to_bits(float_ - floatb)
elif instruction["op"] == "float_multiply":
a = operand_a
b = operand_b
float_ = bits_to_float(a)
floatb = bits_to_float(b)
result = float_to_bits(float_ * floatb)
elif instruction["op"] == "float_divide":
a = operand_a
b = operand_b
float_ = bits_to_float(a)
floatb = bits_to_float(b)
try:
result = float_to_bits(float_ / floatb)
except ZeroDivisionError:
result = float_to_bits(float("nan"))
elif instruction["op"] == "long_float_add":
double = bits_to_double(chips_c.join_words(self.a_hi, self.a_lo))
doubleb = bits_to_double(chips_c.join_words(self.b_hi, self.b_lo))
self.a_hi = chips_c.high_word(double_to_bits(double + doubleb))
self.a_lo = chips_c.low_word(double_to_bits(double + doubleb))
elif instruction["op"] == "long_float_subtract":
double = bits_to_double(chips_c.join_words(self.a_hi, self.a_lo))
doubleb = bits_to_double(chips_c.join_words(self.b_hi, self.b_lo))
self.a_hi = chips_c.high_word(double_to_bits(double - doubleb))
self.a_lo = chips_c.low_word(double_to_bits(double - doubleb))
elif instruction["op"] == "long_float_multiply":
double = bits_to_double(chips_c.join_words(self.a_hi, self.a_lo))
doubleb = bits_to_double(chips_c.join_words(self.b_hi, self.b_lo))
self.a_hi = chips_c.high_word(double_to_bits(double * doubleb))
self.a_lo = chips_c.low_word(double_to_bits(double * doubleb))
elif instruction["op"] == "long_float_divide":
double = bits_to_double(chips_c.join_words(self.a_hi, self.a_lo))
doubleb = bits_to_double(chips_c.join_words(self.b_hi, self.b_lo))
try:
self.a_hi = chips_c.high_word(double_to_bits(double / doubleb))
self.a_lo = chips_c.low_word(double_to_bits(double / doubleb))
except ZeroDivisionError:
self.a_hi = chips_c.high_word(double_to_bits(float("nan")))
self.a_lo = chips_c.low_word(double_to_bits(float("nan")))
elif instruction["op"] == "long_float_file_write":
long_word = chips_c.join_words(self.a_hi, self.a_lo)
self.output_files[instruction["file_name"]].write(
"%.16f\n" %
bits_to_double(long_word))
elif instruction["op"] == "long_file_write":
long_word = chips_c.join_words(self.a_hi, self.a_lo)
self.output_files[instruction["file_name"]].write(
"%f\n" %
long_word)
elif instruction["op"] == "assert":
if operand_a == 0:
raise ChipsAssertionFail(
instruction["file"],
instruction["line"])
elif instruction["op"] == "report":
print "%d (report (int) at line: %s in file: %s)" % (
to_32_signed(self.a_lo),
instruction["line"],
instruction["file"],
)
elif instruction["op"] == "long_report":
print "%d (report (long) at line: %s in file: %s)" % (
to_64_signed(chips_c.join_words(self.a_hi, self.a_lo)),
instruction["line"],
instruction["file"],
)
elif instruction["op"] == "float_report":
print "%f (report (float) at line: %s in file: %s)" % (
bits_to_float(self.a_lo),
instruction["line"],
instruction["file"],
)
elif instruction["op"] == "long_float_report":
print "%s (report (double) at line: %s in file: %s)" % (
bits_to_double(chips_c.join_words(self.a_hi, self.a_lo)),
instruction["line"],
instruction["file"],
)
elif instruction["op"] == "unsigned_report":
print "%d (report (unsigned) at line: %s in file: %s)" % (
self.a_lo,
instruction["line"],
instruction["file"],
)
elif instruction["op"] == "long_unsigned_report":
print "%d (report (unsigned long) at line: %s in file: %s)" % (
chips_c.join_words(self.a_hi, self.a_lo),
instruction["line"],
instruction["file"],
)
elif instruction["op"] == "wait_clocks":
if self.timer == operand_a:
wait = False
self.timer = 0
else:
wait = True
self.timer += 1
else:
print "Unknown machine instruction", instruction["op"]
sys.exit(-1)
# Write data back
if result is not None:
self.registers[z] = result
# manipulate stack pointer
if wait:
self.program_counter = this_instruction
self.clock += 1
|
'''
Compute the smallest multiple of set of numbers
Status: Accepted
'''
from math import gcd
def main():
"""Read input and print output"""
while True:
try:
product = 1
for term in [int(i) for i in input().split()]:
common = gcd(product, term)
product *= term // common
print(product)
except EOFError:
break
if __name__ == '__main__':
main()
|
'''
setup tools
'''
from setuptools import setup, find_packages
setup(
name='kibana-logger',
version=":versiontools:kibana_logger:",
description="module to simply log in syslog with CEE/json format for analysing by kibana",
long_description="",
keywords='kibana, syslog, cee, json',
author='Allan SIMON',
author_email='allan.simon@supinfo.com',
url='https://github.com/allan-simon/python-kibana-logger',
license='MIT',
include_package_data=True,
packages=find_packages(),
classifiers=[
"Programming Language :: Python",
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
setup_requires=[
'versiontools >= 1.8',
],
)
|
"""
Test for pytimeseries library
"""
from transformer import transformer
from AR import AR
from AR import AR_Ridge_2
from AR import AR_Lasso
from AR import AR_ElasticNet
from HoltWinters import HoltWinters
import pandas
import matplotlib
ts = pandas.Series.from_csv('champagne.csv', index_col = 0, header = 0)
model = HoltWinters(alpha = 0.9, beta = False, gamma = False)
result = model.predict(ts)
model_2 = HoltWinters(alpha = 0.9, beta = 0.1, gamma = False)
result_2 = model_2.predict(ts)
matplotlib.pyplot.plot(ts)
matplotlib.pyplot.plot(result)
matplotlib.pyplot.plot(result_2)
matplotlib.pyplot.show()
|
from django.contrib.auth.decorators import user_passes_test
from stronghold import conf, utils
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class LoginRequiredMiddleware(MiddlewareMixin):
"""
Restrict access to users that for which STRONGHOLD_USER_TEST_FUNC returns
True. Default is to check if the user is authenticated.
View is deemed to be public if the @public decorator is applied to the view
View is also deemed to be Public if listed in in django settings in the
STRONGHOLD_PUBLIC_URLS dictionary
each url in STRONGHOLD_PUBLIC_URLS must be a valid regex
"""
def __init__(self, *args, **kwargs):
if MiddlewareMixin != object:
super(LoginRequiredMiddleware, self).__init__(*args, **kwargs)
self.public_view_urls = getattr(conf, "STRONGHOLD_PUBLIC_URLS", ())
def process_view(self, request, view_func, view_args, view_kwargs):
if (
utils.is_view_func_public(view_func)
or self.is_public_url(request.path_info)
or conf.STRONGHOLD_USER_TEST_FUNC(request.user)
and conf.STRONGHOLD_REQUEST_TEST_FUNC(request, view_func, view_args, view_kwargs)
):
return None
decorator = user_passes_test(conf.STRONGHOLD_USER_TEST_FUNC)
return decorator(view_func)(request, *view_args, **view_kwargs)
def is_public_url(self, url):
return any(public_url.match(url) for public_url in self.public_view_urls)
|
from mitmproxy.net.http import Headers
from mitmproxy.net.http import multipart
import pytest
def test_decode():
boundary = 'somefancyboundary'
headers = Headers(
content_type='multipart/form-data; boundary=' + boundary
)
content = (
"--{0}\n"
"Content-Disposition: form-data; name=\"field1\"\n\n"
"value1\n"
"--{0}\n"
"Content-Disposition: form-data; name=\"field2\"\n\n"
"value2\n"
"--{0}--".format(boundary).encode()
)
form = multipart.decode(headers, content)
assert len(form) == 2
assert form[0] == (b"field1", b"value1")
assert form[1] == (b"field2", b"value2")
boundary = 'boundary茅莽'
headers = Headers(
content_type='multipart/form-data; boundary=' + boundary
)
result = multipart.decode(headers, content)
assert result == []
headers = Headers(
content_type=''
)
assert multipart.decode(headers, content) == []
def test_encode():
data = [("file".encode('utf-8'), "shell.jpg".encode('utf-8')),
("file_size".encode('utf-8'), "1000".encode('utf-8'))]
headers = Headers(
content_type='multipart/form-data; boundary=127824672498'
)
content = multipart.encode(headers, data)
assert b'Content-Disposition: form-data; name="file"' in content
assert b'Content-Type: text/plain; charset=utf-8\r\n\r\nshell.jpg\r\n\r\n--127824672498\r\n' in content
assert b'1000\r\n\r\n--127824672498--\r\n'
assert len(content) == 252
with pytest.raises(ValueError, match=r"boundary found in encoded string"):
multipart.encode(headers, [("key".encode('utf-8'), "--127824672498".encode('utf-8'))])
boundary = 'boundary茅莽'
headers = Headers(
content_type='multipart/form-data; boundary=' + boundary
)
result = multipart.encode(headers, data)
assert result == b''
|
import os
import re
from django import template
from django.conf import settings
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.templatetags.staticfiles import static
register = template.Library()
DEFAULT_HTML_TAGS = {
'.css': '<link rel="stylesheet" href="{}">',
'.js': '<script src="{}"></script>'
}
HTML_TAGS = getattr(settings, 'GRUNTED_ASSETS_HTML_TAGS', DEFAULT_HTML_TAGS)
DEFAULT_HTML_TAGS_INLINE = {
'.css': '<style>{}\n</style>',
'.js': '<script>{}\n</script>'
}
HTML_TAGS_INLINE = getattr(settings, 'GRUNTED_ASSETS_HTML_TAGS_INLINE',
DEFAULT_HTML_TAGS_INLINE)
DEFAULT_DIRS = {
True: '.tmp',
False: 'dist'
}
CURRENT_STATIC_DIR = getattr(settings, 'GRUNTED_ASSETS_DIR',
DEFAULT_DIRS[settings.DEBUG])
def find_asset(filename):
# TODO: cache this?
filename_re = re.compile(filename, re.IGNORECASE)
found_files = []
for finder in finders.get_finders():
for file in finder.list('xyz'):
if file[0].startswith(CURRENT_STATIC_DIR):
if filename_re.match(os.path.basename(file[0])):
found_files.append(file)
if not found_files:
raise IOError('Could not find any file matching {} in {}'.format(
filename, CURRENT_STATIC_DIR))
if len(found_files) > 1:
raise IOError('Found more than one file matching {} in {}: {}'.format(
filename,
CURRENT_STATIC_DIR,
', '.join([f[0] for f in found_files])))
return found_files[0]
@register.simple_tag
def link_asset(filename_re):
"""
The `{% link_asset "<filename_re>" %}` tag is used to get a specific asset
from either the development or production asset output folders (by default
`.tmp` and `dist` respectively). You can use a filename regex that will
match both the file in dev as in production, like for example:
`'tail.*\.js'`, matching your `tail.js` in development and
`tail.f23r0df0se.js` in production.
Raises an error when zero or multiple files are found.
"""
asset = find_asset(filename_re)[0]
base, ext = os.path.splitext(asset)
if ext not in HTML_TAGS.keys():
raise IOError('Found a file matching "{}" ({}), but no known html tag '
'found for this extension "{}"'.format(filename_re,
asset,
ext))
return HTML_TAGS[ext].format(static(asset))
@register.simple_tag
def inline_asset(filename_re):
"""
The `{% inline_asset "<filename_re>" %}` tag is used to inline a specific
asset. File finding is implemented the same as the `link_asset` tag does.
Raises an error when zero or multiple files are found.
"""
asset, storage = find_asset(filename_re)
base, ext = os.path.splitext(asset)
if ext not in HTML_TAGS_INLINE.keys():
raise IOError('Found a file matching "{}" ({}), but no known inline '
'html tag found for the extension "{}"'.format(filename_re,
asset,
ext))
return HTML_TAGS_INLINE[ext].format(storage.open(asset).read())
@register.simple_tag
def asset_path(filename_re):
"""Return just the path, so you can use it in other tags."""
asset, storage = find_asset(filename_re)
return static(asset)
|
from frontend import app
from flask import render_template
@app.route('/')
def index():
return render_template('index.html')
|
import argparse
import requests
import sys
import re
import warnings
import multiprocessing as mp
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import xmltodict
def get_desc():
desc = 'Convert between various gene or protein IDs'
return desc
def parse_args(test_args=None, subparsers=None):
# desc
desc = get_desc()
epi = """DESCRIPTION:
Convert between various gene or protein IDs using entrez API requests.
Entrez requests can be done in parallel.
IDs:
If the IDs are provided in a table, select the column delimiter and
column number (1-indexed).
The list of IDs can be provided via STDIN by using `STDIN`
"""
if subparsers:
parser = subparsers.add_parser('convert', description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
else:
parser = argparse.ArgumentParser(description=desc, epilog=epi,
formatter_class=argparse.RawTextHelpFormatter)
# args
io = parser.add_argument_group('Input/Output')
io.add_argument('IDs', metavar='IDs', type=str,
help='A file containing IDs. See description.')
io.add_argument('-m', '--method', default='Accession2Taxonomy',
choices=['Accession2Taxonomy',
'EntrezGene2Uniprot',
'Uniprot2EntrezGene',
'HGNC2EntrezGene',
'EntrezTranGene2EntrezGene'],
help='ID conversion method. (default: %(default)s)')
io.add_argument('-s', '--sep', default='\t',
help='Column separator (default: %(default)s)')
io.add_argument('-c', '--column', type=int, default=1,
help='Column containing the IDs (default: %(default)s)')
io.add_argument('-o', '--outfile', default='-',
help='Output file name; "-" if to STDOUT (default: %(default)s)')
io.add_argument('-e', '--email', default='dummyemail@dummybunny.info',
help='user email address (default: %(default)s)')
misc = parser.add_argument_group('Misc')
misc.add_argument('-p', '--procs', type=int, default=1,
help='Number of processors to use (default: %(default)s)')
# running test args
if test_args:
args = parser.parse_args(test_args)
return args
def main(args=None):
# Input
if args is None:
args = parse_args()
# reading IDs
IDs = read_IDs(args.IDs, sep=args.sep, column=args.column)
# conversion
IDs = get_conversion(IDs, args.email, args.method, procs=args.procs)
# writing IDs
write_IDs(IDs, args.outfile)
def write_IDs(IDs, outfile='-'):
"""writing out IDs
"""
if outfile == '-':
outF = sys.stdout
else:
outF = open(outfile, 'w')
header = '\t'.join(['orig_ID', 'new_ID'])
outF.write(header + '\n')
for x in IDs:
outF.write('\t'.join(x) + '\n')
if outfile != '-':
outF.close()
def get_conversion(IDs, email, method, procs=1):
conv = Conversion(email)
if procs < 2:
procs = None
pool = mp.Pool(processes = procs)
if method.lower() == 'accession2taxonomy':
IDs = pool.map(conv.convert_accession_to_taxid, IDs)
elif method.lower() == 'entrezgene2uniprot':
IDs = pool.map(conv.convert_entrez_to_uniprot, IDs)
elif method.lower() == 'uniprot2entrezgene':
IDs = pool.map(convert_uniprot_to_entrez, IDs)
elif method.lower() == 'hgnc2entrezgene':
IDs = pool.map(convert_hgnc_to_entrez, IDs)
elif method.lower() == 'entreztrangene2entrezgene':
IDs = pool.map(convert_ensembl_to_entrez, IDs)
else:
msg = 'Method "{}" not recognized'
raise IOError(msg.format(method))
return IDs
def read_IDs(infile, sep='\t', column=1):
"""Reading in IDs
"""
column = column - 1
if infile == 'STDIN':
inF = sys.stdin
else:
inF = open(infile, 'r')
IDs = []
for line in inF:
line = line.rstrip().split(sep)
IDs.append(line[column])
if infile != 'STDIN':
inF.close()
return IDs
class Conversion(object):
def __init__(self, email):
"""email is required
"""
self.params = {}
self.email = email
self.params['tool'] = 'PyEntrez'
if re.match(r"[^@]+@[^@]+\.[^@]+", self.email):
pass
else:
raise ValueError("Enter a valid Email Address")
self.params["email"] = email
self.options = urlencode(self.params, doseq=True)
def convert_ensembl_to_entrez(self, ensembl):
"""Convert Ensembl Id to Entrez Gene Id
"""
if 'ENST' in ensembl:
pass
else:
raise(IndexError)
# Submit resquest to NCBI eutils/Gene database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(ensembl)
r = requests.get(server, headers={"Content-Type": "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
try:
geneId = info['eSearchResult']['IdList']['Id']
except TypeError:
sys.stderr.write('WARNING: No Entrez ID for "{}"\n'.format(ensembl))
geneId = None
return [ensembl, geneId]
def convert_hgnc_to_entrez(self, hgnc):
"""Convert HGNC Id to Entrez Gene Id
"""
entrezdict = {}
server = "http://rest.genenames.org/fetch/hgnc_id/{0}".format(hgnc)
r = requests.get(server, headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
info = xmltodict.parse(response)
try:
for data in info['response']['result']['doc']['str']:
if data['@name'] == 'entrez_id':
entrezdict[data['@name']] = data['#text']
if data['@name'] == 'symbol':
entrezdict[data['@name']] = data['#text']
except KeyError:
sys.stderr.write('WARNING: No Entrez ID for "{}"\n'.format(hgnc))
entrezdict = None
try:
entrezdict = entrezdict['entrez_id']
except (KeyError, TypeError):
entrezdict = None
return [hgnc, entrezdict]
def convert_entrez_to_uniprot(self, entrez):
"""Convert Entrez Id to Uniprot Id
"""
server = "http://www.uniprot.org/uniprot/?query=%22GENEID+{0}%22&format=xml".format(entrez)
r = requests.get(server, headers={ "Content-Type" : "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
response = r.text
info = xmltodict.parse(response)
try:
data = info['uniprot']['entry']['accession'][0]
except TypeError:
data = info['uniprot']['entry'][0]['accession'][0]
return [entrez, data]
def convert_uniprot_to_entrez(self, uniprot):
"""Convert Uniprot Id to Entrez Id
"""
# Submit request to NCBI eutils/Gene Database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?" + self.options + "&db=gene&term={0}".format(uniprot)
r = requests.get(server, headers={ "Content-Type" : "text/xml"})
if not r.ok:
r.raise_for_status()
sys.exit()
# Process Request
response = r.text
info = xmltodict.parse(response)
geneId = info['eSearchResult']['IdList']['Id']
# check to see if more than one result is returned
# if you have more than more result then check which Entrez Id returns the same uniprot Id entered.
if len(geneId) > 1:
for x in geneId:
c = self.convert_entrez_to_uniprot(x)[1]
c = c.lower()
u = uniprot.lower()
if c==u:
return [uniprot, x]
else:
return [uniprot, geneId]
def convert_accession_to_taxid(self, accessionid):
"""Convert Accession Id to Tax Id
"""
# Submit request to NCBI eutils/Taxonomy Database
server = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" + self.options + "&db=nuccore&id={0}&retmode=xml".format(accessionid)
r = requests.get(server, headers={ "Content-Type" : "text/xml"})
if not r.ok:
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
sys.stderr.write('WARNING: No taxonomy ID for "{}"\n'.format(accessionid))
return [accessionid, None]
# Process Request
response = r.text
records = xmltodict.parse(response)
try:
for i in records['GBSet']['GBSeq']['GBSeq_feature-table']['GBFeature']['GBFeature_quals']['GBQualifier']:
for key, value in i.items():
if value == 'db_xref':
taxid = i['GBQualifier_value']
taxid = taxid.split(':')[1]
return [accessionid, taxid]
except:
for i in records['GBSet']['GBSeq']['GBSeq_feature-table']['GBFeature'][0]['GBFeature_quals']['GBQualifier']:
for key, value in i.items():
if value == 'db_xref':
taxid = i['GBQualifier_value']
taxid = taxid.split(':')[1]
return [accessionid, taxid]
return [accessionid, None]
|
import os
import sys
import setuptools
from distutils.command.clean import clean as _clean
from distutils.command.build import build as _build
from setuptools.command.sdist import sdist as _sdist
from setuptools.command.build_ext import build_ext as _build_ext
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(filename):
with open(os.path.join(os.getcwd(),
'requirements',
filename)) as fp:
return filter(None, [strip_comments(l)
for l in fp.readlines()])
setup_ext = {}
if os.path.isfile('gulpfile.js'):
# 如果 gulpfile.js 存在, 就压缩前端代码
def gulp_build(done=[]):
if not done:
if os.system('npm install '
'--disturl=https://npm.taobao.org/dist '
'--registry=https://registry.npm.taobao.org'):
sys.exit(1)
if os.system('bower install'):
sys.exit(1)
if os.system('gulp build'):
sys.exit(1)
done.append(1)
def gulp_clean(done=[]):
if not done:
if os.system('npm install '
'--disturl=https://npm.taobao.org/dist '
'--registry=https://registry.npm.taobao.org'):
sys.exit(1)
if os.system('gulp clean'):
sys.exit(1)
done.append(1)
class build(_build):
sub_commands = _build.sub_commands[:]
# force to build ext
for ix, (name, checkfunc) in enumerate(sub_commands):
if name == 'build_ext':
sub_commands[ix] = (name, lambda self: True)
class build_ext(_build_ext):
def run(self):
gulp_build()
_build_ext.run(self)
class sdist(_sdist):
def run(self):
gulp_build()
_sdist.run(self)
class clean(_clean):
def run(self):
_clean.run(self)
gulp_clean()
setup_ext = {'cmdclass': {'sdist': sdist,
'clean': clean,
'build': build,
'build_ext': build_ext}}
setup_params = dict(
name="qsapp-suibe",
url="http://wiki.yimiqisan.com/",
version='1.0',
author="qisan",
author_email="qisanstudio@gmail.com",
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=reqs('install.txt'))
setup_params.update(setup_ext)
if __name__ == '__main__':
setuptools.setup(**setup_params)
|
""" _______
|_TO_DO_|
Be sure to do powerups for guns and stuff
Be shure to start work on a light bike game
SPACE FIGHTERS! Verson 0.1 BETA!
"""
import pygame, sys, time, random
from pygame.locals import *
pygame.init()
disappear = 0
WHITE = (255,255,255)
S1speed = 3
S2speed = 3
STARTTIME = time.time()
YELLOW = (255,255,0)
RED = (255, 0 ,0)
BLUE = (0, 0, 255)
ASTROIDCOLOR = (169, 169, 169)
BACKGROUND = (0,0,0)
WINDOWWIDTH = 1350
WINDOWHEIGHT = 400
ASTNUM = 25
windowSurface = pygame.display.set_mode([WINDOWWIDTH, WINDOWHEIGHT])
pygame.display.set_caption('CHAGE THIS LATER')
astspeed = []
powerPill = []
PPnum = 9
astroid = []
power = []
speedKeep = []
for i in range(0,ASTNUM):
Ay = random.randrange(0,WINDOWHEIGHT-100)
Ax = random.randrange(60,WINDOWWIDTH-100)#where the astroid is
astroid.append(pygame.Rect(Ax,Ay,70,60))
astspeed.append(random.randrange(1,5))
AstS = random.randrange(20,80)
astroid[i].size = (AstS,AstS)#speaks for itself
for i in range (0,PPnum):#power pill stuff
Px = random.randrange(100,WINDOWWIDTH-100)
Py = random.randrange(0, WINDOWHEIGHT)
powerPill.append(pygame.Rect(Px,Py,5,5))
# pygame.draw.rect(windowSurface,WHITE, powerPill[i])
power.append
for i in range (3,PPnum):
power.append(i)
Ship1 = pygame.Rect(20, 200,10, 10)
Ship2 = pygame.Rect(WINDOWWIDTH - 30,200,10,10)
pygame.key.set_repeat(50,50)
while True:#Game loop. everything after this has to be indented
currenttime = time.time()
elapsedtime = currenttime -STARTTIME
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type is KEYDOWN:
pygame.draw.rect(windowSurface, BACKGROUND, Ship2)
pygame.draw.rect(windowSurface, BACKGROUND, Ship1)
key = pygame.key.name(event.key)
if(key == 'd'):
Ship1.left += S1speed
elif(key == 's'):
Ship1.bottom += S1speed
if Ship1.bottom >= WINDOWHEIGHT:
Ship1.bottom = WINDOWHEIGHT-2
elif(key == 'a'):
Ship1.left -= S1speed
if Ship1.left <= 0:
Ship1.left = 0
elif(key == 'w'):
Ship1.bottom -= S1speed
if Ship1.top <= 0:
Ship1.top = 0
if(key == 'right'):
Ship2.left += S2speed
if Ship2.right >= WINDOWWIDTH:
Ship2.right = WINDOWWIDTH - 2
elif(key == 'down'):
Ship2.bottom += S2speed
if Ship2.bottom >= WINDOWHEIGHT:
Ship2.bottom = WINDOWHEIGHT-2
elif(key == 'left'):
Ship2.left -= S2speed
elif(key == 'up'):
Ship2.bottom -= S2speed
if Ship2.top <= 0:
Ship2.top = WINDOWHEIGHT - 400
print(S1speed)
if Ship1.right >= WINDOWWIDTH:
# initialize font; must be called after 'pygame.init()' to avoid 'Font not Initialized' error
myfont = pygame.font.SysFont(0,90)#setting for the font it self
# render text
label = myfont.render("RED SHIP WINS!!!", 1,RED)#("text",size, color
textrec = label.get_rect()
textrec.top=WINDOWHEIGHT/2#puttting the textrec on the screen
textrec.left=200#above
windowSurface.blit(label,textrec)#putting the text rec onto the surface
pygame.display.update()
time.sleep(5)
pygame.quit()
sys.exit()
if Ship2.left <= 0:
# initialize font; must be called after 'pygame.init()' to avoid 'Font not Initialized' error
myfont2 = pygame.font.SysFont(0,90)#setting for the font it self
# render text
label = myfont2.render("YELLOW SHIP WINS!!!", 1,YELLOW)#("text",size, color
textrec = label.get_rect()
textrec.top=200#puttting the textrec on the screen
textrec.left=200#above
windowSurface.blit(label,textrec)#putting the text rec onto the surface
pygame.display.update()
time.sleep(5)
pygame.quit()
sys.exit()
pygame.draw.rect(windowSurface, YELLOW, Ship2)
pygame.draw.rect(windowSurface, RED, Ship1)
#print(astroid.left)
pygame.display.update()
#============================================================#
powerloc = Ship1.collidelist(powerPill)
if powerloc >= 0:
if power[powerloc] == 1:
S1speed += 1
power[powerloc] = 0
pygame.draw.rect(windowSurface,BACKGROUND,powerPill[powerloc])
'''for i in range(0,len(powerPill)):
if power[i] > 0:
pygame.draw.rect(windowSurface,WHITE, powerPill[i])
'''
for i in range(0,ASTNUM):
pygame.draw.rect(windowSurface, BACKGROUND, astroid[i])
if astroid[i].top >= WINDOWHEIGHT:
astroid[i].bottom = 0
astroid[i].bottom += astspeed[i]
pygame.draw.rect(windowSurface, ASTROIDCOLOR,astroid[i])
if Ship1.collidelist(astroid) > -1:
pygame.draw.rect(windowSurface, BACKGROUND, Ship1)
Ship1.left = 0
if Ship2.collidelist(astroid) > -1:
pygame.draw.rect(windowSurface, BACKGROUND, Ship2)
Ship2.right = WINDOWWIDTH
powerloc = Ship1.collidelist(powerPill)
# for i in range(0,PPnum):
# pygame.draw.rect(windowSurface, WHITE, powerPill[i])
powerloc = Ship1.collidelist(powerPill)
if powerloc >= 0:
S1speed += 1
power[powerloc] = 0
time.sleep(.05)
|
from __future__ import division, absolute_import, print_function, unicode_literals
from oucfeed.crawler import util
from oucfeed.crawler.newsspider import NewsSpider
class Spider(NewsSpider):
"""继续教育学院
这个网站的列表页有一个和其他的不太一样,日期时间那里,看上去很明显
难以用同一个XPath提取,因此放弃从列表页提取日期时间
注意这个网站使用了<base>标签,相对url的解析需参考其中的url
"""
name = "院系/继续"
list_urls = [
"http://web.ouc.edu.cn/jxjy/xydt/list.htm",
"http://web.ouc.edu.cn/jxjy/tzgg/list.htm",
"http://web.ouc.edu.cn/jxjy/xlzs/list.htm",
"http://web.ouc.edu.cn/jxjy/pxxm/list.htm",
"http://web.ouc.edu.cn/jxjy/jxyj/list.htm",
"http://web.ouc.edu.cn/jxjy/gzzd/list.htm",
"http://web.ouc.edu.cn/jxjy/zyxz/list.htm",
]
list_extract_scope = "#tpl_w33"
list_extract_field = {
'link': "a",
#'datetime': ".//div/text()[last()] | .//span[@class=' articlelist1_issuetime ']/text()",
'category': "//div[@frag='窗口32']",
'title': "a",
}
item_url_pattern = r"http://web.ouc.edu.cn/jxjy/.*/page\.htm"
item_extract_scope = "[frag='窗口2']"
item_extract_field = {
'datetime': ".biaoti12_red:nth-child(1)",
'title': ".biaoti",
'content': "td.article",
}
datetime_format = "%Y-%m-%d"
def process_link(self, link):
return util.normalize_url(link, "http://web.ouc.edu.cn/jxjy/")
|
from django.contrib import admin
class DepositWithdrawalFilter(admin.SimpleListFilter):
"""
A simple filter to select deposits, withdrawals or empty transactions
"""
title = 'Transaction type'
parameter_name = 'amount'
def lookups(self, request, model_admin):
"""
Tuples with values for url and display term
"""
return (
('positive', 'Deposit'),
('negative', 'Withdrawal'),
('empty', 'Empty')
)
def queryset(self, request, queryset):
if self.value() == 'positive':
return queryset.filter(amount__gt=0)
if self.value() == 'negative':
return queryset.filter(amount__lt=0)
if self.value() == 'empty':
return queryset.filter(amount=0)
|
import weakref
class Signal:
def __init__(self):
self.__receivers = []
def connect(self, receiver, weak=True):
lookup_key = self.__make_id(receiver)
if weak:
receiver = weakref.ref(receiver, self.disconnect)
if lookup_key in self.__receivers:
return
self.__receivers.append((lookup_key, receiver))
def disconnect(self, receiver=None):
lookup_key = self.__make_id(receiver)
for index in range(len(self.__receivers)):
r_key, _ = self.__receivers[index]
if r_key == lookup_key:
del self.__receivers[index]
break
@staticmethod
def __make_id(target):
if hasattr(target, 'im_func'):
return id(target.im_self), id(target.im_func)
return id(target)
def __lived_receivers(self):
receivers = []
for receiver in self.__receivers:
if isinstance(receiver, weakref.ReferenceType):
receiver = receiver()
if receiver:
receiver.append_instruction(receiver)
return receivers
def __call__(self, *args, **kwargs):
for receiver in self.__lived_receivers():
receiver(*args, **kwargs)
|
import pytest
from protolite import encoder
class decoding(object):
message_foo = dict([
(1, dict([
('type', 'string'),
('name', 'body'),
('scope', 'optional'),
])),
(2, dict([
('type', 'string'),
('name', 'messages'),
('scope', 'repeated'),
])),
])
message_bar = dict([
(1, dict([
('type', 'enum'),
('name', 'type'),
('scope', 'optional'),
])),
(4, dict([
('type', 'embedded'),
('name', 'message_foo'),
('message', message_foo),
('scope', 'optional'),
])),
])
message_baz = dict([
(1, dict([
('type', 'embedded'),
('name', 'message_bar'),
('message', message_bar),
('scope', 'optional'),
])),
(3, dict([
('type', 'uint64'),
('name', 'baz_id'),
('scope', 'optional'),
])),
])
message_sna = dict([
(1, dict([
('type', 'enum'),
('name', 'type'),
('scope', 'optional'),
])),
(8, dict([
('type', 'embedded'),
('name', 'message_baz'),
('message', message_baz),
('scope', 'optional'),
])),
])
foo = dict([
(1, dict([
('type', 'uint64'),
('name', 'foo_id'),
('scope', 'optional'),
])),
(2, dict([
('type', 'bool'),
('name', 'is_foo'),
('scope', 'optional'),
])),
(3, dict([
('type', 'uint32'),
('name', 'foo_count'),
('scope', 'optional'),
])),
(305, dict([
('type', 'int32'),
('name', 'foo_value'),
('scope', 'optional'),
])),
])
bar = dict([
(1, dict([
('type', 'uint64'),
('name', 'bar_id'),
('scope', 'optional'),
])),
(2, dict([
('type', 'float'),
('name', 'bar_value'),
('scope', 'optional'),
])),
(3, dict([
('type', 'double'),
('name', 'bar_result'),
('scope', 'optional'),
])),
(5, dict([
('type', 'embedded'),
('name', 'foos'),
('message', foo),
('scope', 'repeated'),
])),
])
sna = dict([
(1, dict([
('type', 'uint64'),
('name', 'sna_ids'),
('scope', 'repeated'),
])),
(2, dict([
('type', 'double'),
('name', 'snas'),
('scope', 'repeated'),
])),
(3, dict([
('type', 'float'),
('name', 'foos'),
('scope', 'repeated'),
])),
(4, dict([
('type', 'uint32'),
('name', 'counts'),
('scope', 'repeated'),
])),
])
class encoding(object):
message_foo = dict([
('body', dict([
('type', 'string'),
('field', 1),
('scope', 'optional'),
])),
('messages', dict([
('type', 'string'),
('field', 2),
('scope', 'repeated'),
])),
])
message_bar = dict([
('type', dict([
('type', 'enum'),
('field', 1),
('scope', 'optional'),
])),
('message_foo', dict([
('type', 'embedded'),
('field', 4),
('message', message_foo),
('scope', 'optional'),
])),
])
message_baz = dict([
('message_bar', dict([
('type', 'embedded'),
('field', 1),
('message', message_bar),
('scope', 'optional'),
])),
('baz_id', dict([
('type', 'uint64'),
('field', 3),
('scope', 'optional'),
])),
])
foo = dict([
('foo_id', dict([
('type', 'uint64'),
('field', 1),
('scope', 'optional'),
])),
('is_foo', dict([
('type', 'bool'),
('field', 2),
('scope', 'optional'),
])),
('foo_count', dict([
('type', 'uint32'),
('field', 3),
('scope', 'optional'),
])),
('foo_value', dict([
('type', 'int32'),
('field', 305),
('scope', 'optional'),
])),
])
bar = dict([
('bar_id', dict([
('type', 'uint64'),
('field', 1),
('scope', 'optional'),
])),
('bar_value', dict([
('type', 'float'),
('field', 2),
('scope', 'optional'),
])),
('bar_result', dict([
('type', 'double'),
('field', 3),
('scope', 'optional'),
])),
('foos', dict([
('type', 'embedded'),
('field', 5),
('message', foo),
('scope', 'repeated'),
])),
])
message_sna = dict([
('type', dict([
('type', 'enum'),
('field', 1),
('scope', 'optional'),
])),
('message_baz', dict([
('type', 'embedded'),
('field', 8),
('message', message_baz),
('scope', 'optional'),
])),
])
sna = dict([
('sna_ids', dict([
('type', 'uint64'),
('field', 1),
('scope', 'repeated'),
])),
('snas', dict([
('type', 'double'),
('field', 2),
('scope', 'repeated'),
])),
('foos', dict([
('type', 'float'),
('field', 3),
('scope', 'repeated'),
])),
('counts', dict([
('type', 'uint32'),
('field', 4),
('scope', 'repeated'),
])),
])
def test_decode_key_as_varint():
data = '\x88\x13\x08'
msg = encoder.decode(decoding.foo, data)
want = dict([
('foo_value', 8),
])
assert want == msg
def test_encode_key_as_varint():
# Don't check against data string since protolite doesn't use OrderedDict
msg = dict([
('foo_value', 8),
])
data = encoder.encode(encoding.foo, msg)
res = encoder.decode(decoding.foo, data)
assert msg == res
def test_decode_int32():
data = '\x18\x7f'
msg = encoder.decode(decoding.foo, data)
want = dict([('foo_count', 127)])
assert want == msg
def test_encode_int32():
# Don't check against data string since protolite doesn't use OrderedDict
msg = dict([('foo_count', 127)])
data = encoder.encode(encoding.foo, msg)
res = encoder.decode(decoding.foo, data)
assert msg == res
def test_decode_uint64():
data = '\x08\x80\xa0\x88\x84\x80\x8a\xa5\xfe\r'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_id', 1007843487950966784L),
])
assert want == msg
def test_encode_uint64():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_id', 1007843487950966784L),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_encode_uint64_negative():
with pytest.raises(ValueError) as einfo:
msg = dict([
('bar_id', -155496620801056360),
])
encoder.encode(encoding.bar, msg)
want = 'ValueError: uint64 value cannot be negative: -155496620801056360'
assert einfo.exconly() == want
def test_decode_bool():
data = '\x10\x00'
msg = encoder.decode(decoding.foo, data)
want = dict([('is_foo', False)])
assert want == msg
def test_encode_bool():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([('is_foo', False)])
data = encoder.encode(encoding.foo, msg)
res = encoder.decode(decoding.foo, data)
assert msg == res
def test_decode_enum():
data = '\x08\x07'
msg = encoder.decode(decoding.message_bar, data)
want = dict([('type', 7)])
assert want == msg
def test_encode_enum():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([('type', 7)])
data = encoder.encode(encoding.message_bar, msg)
res = encoder.decode(decoding.message_bar, data)
assert msg == res
def test_decode_repeated_varint():
data = '\x08\n\x08\x14'
msg = encoder.decode(decoding.sna, data)
want = dict([
('sna_ids', [10, 20]),
])
assert want == msg
def test_encode_repeated_varint():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('sna_ids', [10, 20]),
])
data = encoder.encode(encoding.sna, msg)
res = encoder.decode(decoding.sna, data)
assert msg == res
def test_encode_repeated_uint_negative():
with pytest.raises(ValueError) as einfo:
msg = dict([
('counts', [1, -2, 3]),
])
encoder.encode(encoding.sna, msg)
want = 'ValueError: uint32 value cannot be negative: -2'
assert einfo.exconly() == want
def test_decode_64bit():
data = '\x19\x00\x00\x00\xe0%\x99^\xc0'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_result', -122.39293670654297),
])
assert want == msg
def test_encode_64bit():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_result', -122.39293670654297),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_decode_64bit_repeated():
data = '\x11\x00\x00\x00\xe0%\x99^\xc0\x11\x8fB\x9a\xf4\xdcZm@'
msg = encoder.decode(decoding.sna, data)
want = dict([
('snas', [-122.39293670654297, 234.839472104348218943324]),
])
assert want == msg
def test_encode_64bit_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('snas', [-122.39293670654297, 234.839472104348218943324]),
])
data = encoder.encode(encoding.sna, msg)
res = encoder.decode(decoding.sna, data)
assert msg == res
def test_decode_delimited_length_as_varint():
dec_message = dict([
(1, dict([
('type', 'string'),
('name', 'first_name'),
('scope', 'optional'),
])),
])
dec_proto = dict([
(305, dict([
('type', 'embedded'),
('name', 'dec_message'),
('message', dec_message),
('scope', 'optional'),
])),
])
data = '\x8a\x13\xcf\t'
msg = encoder.decode(dec_proto, data)
# we don't care about the items, only the value of the length
want = dict([
('dec_message', dict()),
])
assert want == msg
def test_encode_delimited_length_as_varint():
# Don't check against data string since encoder doesn't use OrderedDict
# We need lots of items to create a large length value
def _index():
for i in range(0, 22):
for j in range(32, 127):
yield j+(127*i), chr(j)*(i+1)
enc_message = dict()
for i, c in _index():
enc_message[c] = dict([
('type', 'string'),
('field', i),
('scope', 'optional'),
])
enc_proto = dict([
('message_foo', dict([
('type', 'embedded'),
('field', 305),
('message', enc_message),
('scope', 'optional'),
])),
])
dec_message = dict()
for i, c in _index():
dec_message[i] = dict([
('type', 'string'),
('name', c),
('scope', 'optional'),
])
dec_proto = dict([
(305, dict([
('type', 'embedded'),
('name', 'message_foo'),
('message', dec_message),
('scope', 'optional'),
])),
])
msg = dict()
for i, c in _index():
msg[c] = str(i)
msg = dict([
('message_foo', msg),
])
data = encoder.encode(enc_proto, msg)
res = encoder.decode(dec_proto, data)
assert msg == res
def test_decode_embedded():
data = '\x08\x08B\x12\n\r\x08\x04"\t\n\x07foobody\x18\xb9`'
msg = encoder.decode(decoding.message_sna, data)
want = dict([
('message_baz', dict([
('baz_id', 12345),
('message_bar', dict([
('message_foo', dict([
('body', 'foobody'),
])),
('type', 4),
])),
])),
('type', 8),
])
assert want == msg
def test_encode_embedded():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('message_baz', dict([
('baz_id', 12345),
('message_bar', dict([
('message_foo', dict([
('body', 'foobody'),
])),
('type', 4),
])),
])),
('type', 8),
])
data = encoder.encode(encoding.message_sna, msg)
res = encoder.decode(decoding.message_sna, data)
assert msg == res
def test_decode_string():
data = '\n\hello world'
msg = encoder.decode(decoding.message_foo, data)
want = dict([
('body', 'hello world'),
])
assert want == msg
def test_encode_string():
# Don't check against data string since protolite doesn't use OrderedDict
msg = dict([
('body', 'hello world'),
])
data = encoder.encode(encoding.message_foo, msg)
res = encoder.decode(decoding.message_foo, data)
assert msg == res
msg = dict([
('body', u'\u03b3\u03b5\u03b9\u03b1'),
])
data = encoder.encode(encoding.message_foo, msg)
res = encoder.decode(decoding.message_foo, data)
assert msg == res
def test_decode_embedded_repeated():
data = '\x08\x1e*\x02\x08\n*\x02\x08\x14'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_id', 30),
('foos', [
dict([('foo_id', 10)]),
dict([('foo_id', 20)]),
]),
])
assert want == msg
def test_encode_embedded_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_id', 30),
('foos', [
dict([('foo_id', 10)]),
dict([('foo_id', 20)]),
]),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_decode_string_repeated():
data = '\x12\x03bar\x12\x03baz'
msg = encoder.decode(decoding.message_foo, data)
want = dict([
('messages', ['bar', 'baz']),
])
assert want == msg
def test_encode_string_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('messages', ['bar', 'baz']),
])
data = encoder.encode(encoding.message_foo, msg)
res = encoder.decode(decoding.message_foo, data)
assert msg == res
def test_decode_32bit():
data = '\x15/\xc9\xf4\xc2'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_value', -122.39293670654297),
])
assert want == msg
def test_encode_32bit():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_value', -122.39293670654297),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_decode_32bit_repeated():
data = '\x1d/\xc9\xf4\xc2\x1d\xeb\xe2V?'
msg = encoder.decode(decoding.sna, data)
want = dict([
('foos', [-122.39293670654297, 0.8393999934196472]),
])
assert want == msg
def test_encode_32bit_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('foos', [-122.39293670654297, 0.8393999934196472]),
])
data = encoder.encode(encoding.sna, msg)
res = encoder.decode(decoding.sna, data)
assert msg == res
|
import time
from lib.query import Query
class TopicModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "topic"
super(TopicModel, self).__init__()
def get_all_topics(self, num = 16, current_page = 1):
join = "LEFT JOIN user AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN user AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "last_touched DESC, created DESC, last_replied_time DESC, id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def get_all_topics_by_node_slug(self, num = 16, current_page = 1, node_slug = None):
where = "node.slug = '%s'" % node_slug
join = "LEFT JOIN user AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN user AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "last_touched DESC, created DESC, last_replied_time DESC, id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def get_all_topics_count(self):
return self.count()
def get_user_all_topics(self, uid, num = 16, current_page = 1):
where = "topic.author_id = %s" % uid
join = "LEFT JOIN user AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN user AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
order = "id DESC"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def get_user_all_topics_count(self, uid):
where = "author_id = %s" % uid
return self.where(where).count()
def get_user_all_replied_topics(self, uid, num = 16, current_page = 1):
where = "reply.uid = %s" % uid
join = "LEFT JOIN reply ON topic.id = reply.tid LEFT JOIN user ON topic.uid = user.uid"
order = "topic.id DESC"
field = "*, topic.created as created"
group = "tid"
return self.where(where).order(order).join(join).field(field).group(group).pages(current_page = current_page, list_rows = num)
def get_topic_by_topic_id(self, topic_id):
where = "topic.id = %s" % topic_id
join = "LEFT JOIN user AS author_user ON topic.author_id = author_user.uid \
LEFT JOIN node ON topic.node_id = node.id \
LEFT JOIN user AS last_replied_user ON topic.last_replied_by = last_replied_user.uid"
field = "topic.*, \
author_user.username as author_username, \
author_user.nickname as author_nickname, \
author_user.avatar as author_avatar, \
author_user.uid as author_uid, \
author_user.reputation as author_reputation, \
node.name as node_name, \
node.slug as node_slug, \
last_replied_user.username as last_replied_username, \
last_replied_user.nickname as last_replied_nickname"
return self.where(where).join(join).field(field).find()
def add_new_topic(self, topic_info):
return self.data(topic_info).add()
def update_topic_by_topic_id(self, topic_id, topic_info):
where = "topic.id = %s" % topic_id
return self.where(where).data(topic_info).save()
|
"""JSKOS mapping writer."""
from __future__ import unicode_literals, print_function
import json
import six
from .base import LinkWriter
name = 'jskos'
extension = '.ndjson'
class Writer(LinkWriter):
def expand_link(self, link, token, field):
if field in self.meta and self.meta[field]:
return {'uri': link.expand(token, self.meta[field])}
else:
return {'notation': link[token]}
def write_link(self, link):
fromSet = [self.expand_link(link, 'source', 'prefix')]
toSet = [self.expand_link(link, 'target', 'target')]
jskos = {
'type': [self.mapping_type()],
'from': {'memberSet': fromSet},
'to': {'memberSet': toSet},
}
if self.meta['sourceset']:
jskos['fromScheme'] = {'uri':self.meta['sourceset']}
if self.meta['targetset']:
jskos['toScheme'] = {'uri':self.meta['targetset']}
self.print(six.u(json.dumps(jskos, self.stream, sort_keys=True)))
|
import usb.core
from time import sleep
DELAY = 0.01
class rocket:
def __init__(self):
self.connect()
def connect(self):
self.r = usb.core.find(idVendor=0x2123, idProduct=0x1010)
self.r.set_configuration()
def up(self, seconds=DELAY):
self.r.ctrl_transfer(0x21,0x09,0,0,[0x02,0x02,0x00,0x00,0x00,0x00,0x00,0x00])
if seconds > 0:
sleep(seconds)
self.stop()
def down(self, seconds=DELAY):
self.r.ctrl_transfer(0x21,0x09,0,0,[0x02,0x01,0x00,0x00,0x00,0x00,0x00,0x00])
if seconds > 0:
sleep(seconds)
self.stop()
def left(self, seconds=DELAY):
self.r.ctrl_transfer(0x21,0x09,0,0,[0x02,0x04,0x00,0x00,0x00,0x00,0x00,0x00])
if seconds > 0:
sleep(seconds)
self.stop()
def right(self, seconds=DELAY):
self.r.ctrl_transfer(0x21,0x09,0,0,[0x02,0x08,0x00,0x00,0x00,0x00,0x00,0x00])
if seconds > 0:
sleep(seconds)
self.stop()
def stop(self):
self.r.ctrl_transfer(0x21,0x09,0,0,[0x02,0x20,0x00,0x00,0x00,0x00,0x00,0x00])
def fire(self):
self.r.ctrl_transfer(0x21,0x09,0,0,[0x02,0x10,0x00,0x00,0x00,0x00,0x00,0x00])
sleep(4)
@staticmethod
def demo():
r = rocket()
r.connect()
r.up(2)
r.fire()
r.left(2)
r.fire()
r.right(2)
r.fire()
r.down(2)
r.fire()
r.up(2)
rocket.demo()
|
import math
from core import dthandler
from core.common import BaseHandler, authorized, clear_cache_by_pathlist, getAttr
from model.categories import Categories
from model.showtypes import ShowTypes
try:
import json
except:
import simplejson as json
class CategoryController(BaseHandler):
@authorized()
def get(self):
act = self.get_argument("act", '').encode('utf-8')
category_id = self.get_argument("id", '').encode('utf-8')
obj = None
if act == 'del':
if category_id:
Categories.delete(category_id)
clear_cache_by_pathlist(['/'])
self.set_header("Content-Type", "application/json")
self.write(json.dumps("OK"))
return
elif act == 'edit':
if category_id:
obj = Categories.get(category_id)
# 分类列表
page = self.get_argument("page", 1)
category = Categories.get_paged(page, getAttr('ADMIN_CATEGORY_NUM'))
total = int(math.ceil(Categories.count_all() / float(getAttr('ADMIN_CATEGORY_NUM'))))
if page == 1:
self.echo('admin_category.html', {
'title': "分类列表",
'objs': category,
'obj': obj,
'category_kv': Categories.get_all_kv(0),
'show_types': ShowTypes.get_all(),
'total': total,
}, layout='_layout_admin.html')
else:
result = {
'list': category,
'total': total,
}
self.set_header("Content-Type", "application/json")
self.write(json.dumps(result, default=dthandler))
return
@authorized()
def post(self):
try:
tf = {'true': 1, 'false': 0}
act = self.get_argument("act", '').encode('utf-8')
category_id = self.get_argument("id", '').encode('utf-8')
father_category_id = self.get_argument("father_id", 0).encode('utf-8')
category_name = self.get_argument("name", '').encode('utf-8')
show_type = self.get_argument("show_type", '').encode('utf-8')
display_order = self.get_argument("sort", '0').encode('utf-8')
allow_comment = tf[self.get_argument("allow_comment", "false").encode('utf-8')]
allow_publish = tf[self.get_argument("allow_publish", "false").encode('utf-8')]
description = self.get_argument("description", '').encode('utf-8')
except:
self.write(json.dumps("用户名、密码、验证码均为必填项!"))
return
if category_name:
params = {'category_id': category_id, 'father_category_id': father_category_id,
'category_name': category_name, 'show_type': show_type, 'display_order': display_order,
'allow_comment': allow_comment, 'allow_publish': allow_publish, 'description': description}
if act == 'add':
Categories.create(params)
if act == 'edit':
Categories.update(params)
clear_cache_by_pathlist(['/'])
self.set_header("Content-Type", "application/json")
self.write(json.dumps("OK"))
else:
self.set_header("Content-Type", "application/json")
self.write(json.dumps("参数异常"))
urls = [
# 分类管理
(r"/admin/category", CategoryController),
]
|
from future import standard_library
standard_library.install_aliases()
from io import BytesIO
import pytest
from django.core.files import File
from django.test import TestCase
from google.cloud.storage import Client
from google.cloud.storage.blob import Blob
from mixer.main import mixer
from mock import create_autospec
from mock import patch
from contentcuration.utils.gcs_storage import GoogleCloudStorage as gcs
class GoogleCloudStorageSaveTestCase(TestCase):
"""
Tests for GoogleCloudStorage.save().
"""
def setUp(self):
self.blob_class = create_autospec(Blob)
self.blob_obj = self.blob_class("blob", "blob")
self.mock_client = create_autospec(Client)
self.storage = gcs(client=self.mock_client())
self.content = BytesIO(b"content")
def test_calls_upload_from_file(self):
"""
Check if upload_from_file is called when we call GoogleCloudStorage.save().
"""
self.storage.save("myfile.jpg", self.content, blob_object=self.blob_obj)
# Check that blob.upload_from_file() has been called inside storage.save()
self.blob_obj.upload_from_file.assert_called()
def test_calls_upload_from_file_with_a_file_object_and_content_type(self):
"""
Check that we call upload_from_file with a file object and content type when
we call GoogleCloudStorage.save().
"""
self.storage.save("myfile.jpg", self.content, blob_object=self.blob_obj)
# Check that we pass self.content file_object to upload_from_file
self.blob_obj.upload_from_file.assert_called_once_with(self.content, content_type="image/jpeg")
def test_checks_does_not_upload_file_if_empty(self):
"""
Check that it doesn't call upload_from_file if the file is empty.
"""
content = BytesIO()
self.storage.save("myfile.jpg", content, blob_object=self.blob_obj)
# check that upload_from_file is never called
self.blob_obj.upload_from_file.assert_not_called()
def test_uploads_max_age_of_5_if_content_database(self):
"""
Check that we set a max-age of 5 if we're uploading a content database
"""
filename = "content/databases/myfile.sqlite3"
self.storage.save(filename, self.content, blob_object=self.blob_obj)
assert "max-age=5" in self.blob_obj.cache_control
def test_uploads_cache_control_private_if_content_database(self):
"""
Check that set set a cache-control of private if we're uploading a content database.
This ensures that no proxy will cache this file.
"""
filename = "content/databases/myfile.sqlite3"
self.storage.save(filename, self.content, blob_object=self.blob_obj)
assert "private" in self.blob_obj.cache_control
@patch("contentcuration.utils.gcs_storage.BytesIO")
@patch("contentcuration.utils.gcs_storage.GoogleCloudStorage._is_file_empty", return_value=False)
def test_gzip_if_content_database(self, bytesio_mock, file_empty_mock):
"""
Check that if we're uploading a gzipped content database and
if the BytesIO object has been closed.
"""
filename = "content/databases/myfile.sqlite3"
self.storage.save(filename, self.content, blob_object=self.blob_obj)
assert self.blob_obj.content_encoding == "gzip"
assert bytesio_mock.called
class GoogleCloudStorageOpenTestCase(TestCase):
"""
Tests for GoogleCloudStorage.open().
"""
class RandomFileSchema:
"""
A schema for a file we're about to upload.
"""
contents = str
filename = str
def setUp(self):
self.blob_class = create_autospec(Blob)
self.blob_obj = self.blob_class("blob", "blob")
self.mock_client = create_autospec(Client)
self.storage = gcs(client=self.mock_client())
self.local_file = mixer.blend(self.RandomFileSchema)
def test_raises_error_if_mode_is_not_rb(self):
"""
open() should raise an assertion error if passed in a mode flag that's not "rb".
"""
with pytest.raises(AssertionError):
self.storage.open("randfile", mode="wb")
def test_calls_blob_download_to_file(self):
"""
Check that open() eventually calls blob.download_to_file().
"""
self.storage.open(self.local_file.filename, blob_object=self.blob_obj)
# assert that we called download_from_file
self.blob_obj.download_to_file.assert_called()
def test_returns_django_file(self):
"""
Test that we return a Django File instance.
"""
f = self.storage.open(self.local_file.filename, blob_object=self.blob_obj)
assert isinstance(f, File)
# This checks that an actual temp file was written on disk for the file.git
assert f.name
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='freqopttest',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='Linear-time interpretable nonparametric two-sample test',
long_description=long_description,
# The project's main homepage.
url='https://github.com/wittawatj/interpretable-test',
# Author details
author='Wittawat Jitkrittum',
author_email='wittawatj@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
],
# What does your project relate to?
keywords='hypothesis-test kernel-methods machine-learning AI',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['data', '*.ex']),
# See https://www.python.org/dev/peps/pep-0440/#version-specifiers
# comparible with 2.7+ including 3.x
python_requires='>= 2.7',
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
#py_modules=["gofte"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'theano', 'scipy', 'autograd', 'future'],
)
|
"""A chart parser and some grammars. (Chapter 22)"""
from collections import defaultdict
import urllib.request
import re
def Rules(**rules):
"""Create a dictionary mapping symbols to alternative sequences.
>>> Rules(A = "B C | D E")
{'A': [['B', 'C'], ['D', 'E']]}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
return rules
def Lexicon(**rules):
"""Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']}
"""
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split('|')]
return rules
class Grammar:
def __init__(self, name, rules, lexicon):
"""A grammar has a set of rules and a lexicon."""
self.name = name
self.rules = rules
self.lexicon = lexicon
self.categories = defaultdict(list)
for lhs in lexicon:
for word in lexicon[lhs]:
self.categories[word].append(lhs)
def rewrites_for(self, cat):
"""Return a sequence of possible rhs's that cat can be rewritten as."""
return self.rules.get(cat, ())
def isa(self, word, cat):
"""Return True iff word is of category cat"""
return cat in self.categories[word]
def __repr__(self):
return '<Grammar {}>'.format(self.name)
E0 = Grammar('E0',
Rules( # Grammar for E_0 [Figure 22.4]
S='NP VP | S Conjunction S',
NP='Pronoun | Name | Noun | Article Noun | Digit Digit | NP PP | NP RelClause', # noqa
VP='Verb | VP NP | VP Adjective | VP PP | VP Adverb',
PP='Preposition NP',
RelClause='That VP'),
Lexicon( # Lexicon for E_0 [Figure 22.3]
Noun="stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east", # noqa
Verb="is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel", # noqa
Adjective="right | left | east | south | back | smelly",
Adverb="here | there | nearby | ahead | right | left | east | south | back", # noqa
Pronoun="me | you | I | it",
Name="John | Mary | Boston | Aristotle",
Article="the | a | an",
Preposition="to | in | on | near",
Conjunction="and | or | but",
Digit="0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
That="that"
))
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
Rules(
S='NP VP',
NP='Art N | Pronoun',
VP='V NP'),
Lexicon(
Art='the | a',
N='man | woman | table | shoelace | saw',
Pronoun='I | you | it',
V='saw | liked | feel'
))
E_NP_ = Grammar('E_NP_', # another trivial grammar for testing
Rules(NP='Adj NP | N'),
Lexicon(Adj='happy | handsome | hairy',
N='man'))
def generate_random(grammar=E_, s='S'):
"""Replace each token in s by a random entry in grammar (recursively).
This is useful for testing a grammar, e.g. generate_random(E_)"""
import random
def rewrite(tokens, into):
for token in tokens:
if token in grammar.rules:
rewrite(random.choice(grammar.rules[token]), into)
elif token in grammar.lexicon:
into.append(random.choice(grammar.lexicon[token]))
else:
into.append(token)
return into
return ' '.join(rewrite(s.split(), []))
class Chart:
"""Class for parsing sentences using a chart data structure. [Figure 22.7]
>>> chart = Chart(E0);
>>> len(chart.parses('the stench is in 2 2'))
1
"""
def __init__(self, grammar, trace=False):
"""A datastructure for parsing a string; and methods to do the parse.
self.chart[i] holds the edges that end just before the i'th word.
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
self.grammar = grammar
self.trace = trace
def parses(self, words, S='S'):
"""Return a list of parses; words can be a list or string."""
if isinstance(words, str):
words = words.split()
self.parse(words, S)
# Return all the parses that span the whole input
# 'span the whole input' => begin at 0, end at len(words)
return [[i, j, S, found, []]
for (i, j, lhs, found, expects) in self.chart[len(words)]
# assert j == len(words)
if i == 0 and lhs == S and expects == []]
def parse(self, words, S='S'):
"""Parse a list of words; according to the grammar.
Leave results in the chart."""
self.chart = [[] for i in range(len(words)+1)]
self.add_edge([0, 0, 'S_', [], [S]])
for i in range(len(words)):
self.scanner(i, words[i])
return self.chart
def add_edge(self, edge):
"Add edge to chart, and see if it extends or predicts another edge."
start, end, lhs, found, expects = edge
if edge not in self.chart[end]:
self.chart[end].append(edge)
if self.trace:
print('Chart: added {}'.format(edge))
if not expects:
self.extender(edge)
else:
self.predictor(edge)
def scanner(self, j, word):
"For each edge expecting a word of this category here, extend the edge." # noqa
for (i, j, A, alpha, Bb) in self.chart[j]:
if Bb and self.grammar.isa(word, Bb[0]):
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
def predictor(self, edge):
"Add to chart any rules for B that could help extend this edge."
(i, j, A, alpha, Bb) = edge
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs])
def extender(self, edge):
"See what edges can be extended by this edge."
(j, k, B, _, _) = edge
for (i, j, A, alpha, B1b) in self.chart[j]:
if B1b and B == B1b[0]:
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
def CYK_parse(words, grammar):
"[Figure 23.5]"
# We use 0-based indexing instead of the book's 1-based.
N = len(words)
P = defaultdict(float)
# Insert lexical rules for each word.
for (i, word) in enumerate(words):
for (X, p) in grammar.categories[word]: # XXX grammar.categories needs changing, above
P[X, i, 1] = p
# Combine first and second parts of right-hand sides of rules,
# from short to long.
for length in range(2, N+1):
for start in range(N-length+1):
for len1 in range(1, length): # N.B. the book incorrectly has N instead of length
len2 = length - len1
for (X, Y, Z, p) in grammar.cnf_rules(): # XXX grammar needs this method
P[X, start, length] = max(P[X, start, length],
P[Y, start, len1] * P[Z, start+len1, len2] * p)
return P
examplePagesSet = ["https://en.wikipedia.org/wiki/", "Aesthetics", "Analytic_philosophy",
"Ancient_Greek", "Aristotle", "Astrology", "Atheism", "Baruch_Spinoza",
"Belief", "Betrand Russell", "Confucius", "Consciousness",
"Continental Philosophy", "Dialectic", "Eastern_Philosophy",
"Epistemology", "Ethics", "Existentialism", "Friedrich_Nietzsche",
"Idealism", "Immanuel_Kant", "List_of_political_philosophers", "Logic",
"Metaphysics", "Philosophers", "Philosophy", "Philosophy_of_mind", "Physics",
"Plato", "Political_philosophy", "Pythagoras", "Rationalism",
"Social_philosophy", "Socrates", "Subjectivity", "Theology",
"Truth", "Western_philosophy"]
def loadPageHTML(addressList):
"""Download HTML page content for every URL address passed as argument"""
contentDict = {}
for addr in addressList:
with urllib.request.urlopen(addr) as response:
raw_html = response.read().decode('utf-8')
# Strip raw html of unnessecary content. Basically everything that isn't link or text
html = stripRawHTML(raw_html)
contentDict[addr] = html
return contentDict
def initPages(addressList):
"""Create a dictionary of pages from a list of URL addresses"""
pages = {}
for addr in addressList:
pages[addr] = Page(addr)
return pages
def stripRawHTML(raw_html):
"""Remove the <head> section of the HTML which contains links to stylesheets etc.,
and remove all other unnessecary HTML"""
# TODO: Strip more out of the raw html
return re.sub("<head>.*?</head>", "", raw_html, flags=re.DOTALL) # remove <head> section
def determineInlinks(page):
"""Given a set of pages that have their outlinks determined, we can fill
out a page's inlinks by looking through all other page's outlinks"""
inlinks = []
for addr, indexPage in pagesIndex.items():
if page.address == indexPage.address:
continue
elif page.address in indexPage.outlinks:
inlinks.append(addr)
return inlinks
def findOutlinks(page, handleURLs=None):
"""Search a page's HTML content for URL links to other pages"""
urls = re.findall(r'href=[\'"]?([^\'" >]+)', pagesContent[page.address])
if handleURLs:
urls = handleURLs(urls)
return urls
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
def expand_pages(pages):
"""From Textbook: adds in every page that links to or is linked from one of
the relevant pages."""
expanded = {}
for addr, page in pages.items():
if addr not in expanded:
expanded[addr] = page
for inlink in page.inlinks:
if inlink not in expanded:
expanded[inlink] = pagesIndex[inlink]
for outlink in page.outlinks:
if outlink not in expanded:
expanded[outlink] = pagesIndex[outlink]
return expanded
def relevant_pages(query):
"""Relevant pages are pages that contain the query in its entireity.
If a page's content contains the query it is returned by the function."""
relevant = {}
print("pagesContent in function: ", pagesContent)
for addr, page in pagesIndex.items():
if query.lower() in pagesContent[addr].lower():
relevant[addr] = page
return relevant
def normalize(pages):
"""From the pseudocode: Normalize divides each page's score by the sum of
the squares of all pages' scores (separately for both the authority and hubs scores).
"""
summed_hub = sum(page.hub**2 for _, page in pages.items())
summed_auth = sum(page.authority**2 for _, page in pages.items())
for _, page in pages.items():
page.hub /= summed_hub
page.authority /= summed_auth
class ConvergenceDetector(object):
"""If the hub and authority values of the pages are no longer changing, we have
reached a convergence and further iterations will have no effect. This detects convergence
so that we can stop the HITS algorithm as early as possible."""
def __init__(self):
self.hub_history = None
self.auth_history = None
def __call__(self):
return self.detect()
def detect(self):
curr_hubs = [page.hub for addr, page in pagesIndex.items()]
curr_auths = [page.authority for addr, page in pagesIndex.items()]
if self.hub_history is None:
self.hub_history, self.auth_history = [], []
else:
diffsHub = [abs(x-y) for x, y in zip(curr_hubs, self.hub_history[-1])]
diffsAuth = [abs(x-y) for x, y in zip(curr_auths, self.auth_history[-1])]
aveDeltaHub = sum(diffsHub)/float(len(pagesIndex))
aveDeltaAuth = sum(diffsAuth)/float(len(pagesIndex))
if aveDeltaHub < 0.01 and aveDeltaAuth < 0.01: # may need tweaking
return True
if len(self.hub_history) > 2: # prevent list from getting long
del self.hub_history[0]
del self.auth_history[0]
self.hub_history.append([x for x in curr_hubs])
self.auth_history.append([x for x in curr_auths])
return False
def getInlinks(page):
if not page.inlinks:
page.inlinks = determineInlinks(page)
return [p for addr, p in pagesIndex.items() if addr in page.inlinks]
def getOutlinks(page):
if not page.outlinks:
page.outlinks = findOutlinks(page)
return [p for addr, p in pagesIndex.items() if addr in page.outlinks]
class Page(object):
def __init__(self, address, hub=0, authority=0, inlinks=None, outlinks=None):
self.address = address
self.hub = hub
self.authority = authority
self.inlinks = inlinks
self.outlinks = outlinks
pagesContent = {} # maps Page relative or absolute URL/location to page's HTML content
pagesIndex = {}
convergence = ConvergenceDetector() # assign function to variable to mimic pseudocode's syntax
def HITS(query):
"""The HITS algorithm for computing hubs and authorities with respect to a query."""
pages = expand_pages(relevant_pages(query)) # in order to 'map' faithfully to pseudocode we
for p in pages: # won't pass the list of pages as an argument
p.authority = 1
p.hub = 1
while True: # repeat until... convergence
for p in pages:
p.authority = sum(x.hub for x in getInlinks(p)) # p.authority ← ∑i Inlinki(p).Hub
p.hub = sum(x.authority for x in getOutlinks(p)) # p.hub ← ∑i Outlinki(p).Authority
normalize(pages)
if convergence():
break
return pages
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.color'
db.add_column('blogs_category', 'color',
self.gf('django.db.models.fields.TextField')(default='#000000', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.color'
db.delete_column('blogs_category', 'color')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'block_css': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_footer': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_header': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_middle': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_navbar': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_other_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_bottom': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_middle_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_right_top': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_single_left': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_button': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_subscribe_text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'block_title': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_color': ('django.db.models.fields.TextField', [], {'default': "'#ff7f00'", 'max_length': '10'}),
'main_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'color': ('django.db.models.fields.TextField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'}),
'top_level_cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
import eyed3
import os
def updateID3(metadata):
audiofile = eyed3.load(metadata["fileName"])
# audiofile.initTag()
audiofile.tag.title = metadata["title"]
print("Title: " + metadata["title"])
audiofile.tag.artist = metadata["artist"]
print("Artist: " + metadata["artist"])
audiofile.tag.album = metadata["album"]
print("Album: " + metadata["album"])
audiofile.tag.genre = metadata["genre"]
print("Genre: " + metadata["genre"])
# audiofile.tag.title = u"Hollow"
# audiofile.tag.track_num = 2
audiofile.tag.save()
|
__all__ = ['components', 'core', 'serial', 'simplification']
|
try:
from pyproj import Proj, transform
### utility methods for projection - pyproj support ########################################
pLatlon = Proj(init='epsg:4326')
p32633 = Proj(init='epsg:32633')
pGoogle = Proj(init='epsg:3857')
def project_to_unit(proj, x, y):
'''Projects any coordinate system to a bent mercator unit square.'''
lon, lat = transform(proj, pLatlon, x, y)
return latlon_to_unit(lat, lon)
def unit_to_project(proj, x, y):
'''Unprojects unit square to any coordinate system.'''
lat, lon = unit_to_latlon(x,y)
return transform(pLatlon, proj, lon, lat)
#############################################################################################
except ImportError:
pass
from math import pi, sin, cos, atan2, sqrt, radians, log, atan, exp, tan
def latlon_to_unit(lat, lon):
'''Projects the given lat/lon to bent mercator image
coordinates [-1,1] x [-1,1]. (as defined by Google)
'''
return (lon / 180.0, log(tan(pi / 4.0 + (lat * pi / 180.0) / 2.0)) / pi) #exact calculation
def unit_to_latlon(x, y):
'''Unprojects the given bent mercator image coordinates [-1,1] x [-1,1] to
the lat/lon space.
'''
return ((2 * atan(exp(y * pi)) - pi / 2) * 180.0 / pi, x * 180)
def p4326_to_unit(lon, lat):
return lon / 180.0, lat / 90.0
def unit_to_p4326(x, y):
return x * 180.0, y * 90.0
GCONST = 20037508.342789244
def latlon_to_google(lat, lon):
x,y = latlon_to_unit(lat, lon)
return x*GCONST, y*GCONST
def google_to_latlon(x, y):
return unit_to_latlon(x / GCONST, y / GCONST)
def unit_to_custom(x, y, bounds):
ulx, uly, orx, ory = bounds
dx, dy = orx-ulx, ory-uly
return ulx + (x + 1.0) / 2.0 * dx , uly + (y + 1.0) / 2.0 * dy
def custom_to_unit(x, y, bounds):
ulx, uly, orx, ory = bounds
dx, dy = orx-ulx, ory-uly
return (x - ulx) * 2.0 / dx - 1.0, (y-uly) * 2.0 / dy - 1.0
def latlon_to_custom(lat, lon, bounds):
ux, uy = latlon_to_unit(lat, lon)
x, y = unit_to_custom(ux, uy, bounds)
return x,y
def custom_to_latlon(x, y, bounds):
u, v = custom_to_unit(x, y, bounds)
l, m = unit_to_latlon(u, v)
return l, m
def fix180(x):
'''wrap all coordinates into [-180;180]'''
return ((x + 180) % 360) - 180
|
def number(bus_stops):
return sum([a[0] - a[1] for a in bus_stops])
|
import psycopg2
import peewee
from aiohttp import web
from aiohttp_session import get_session
from aiowing import settings
from aiowing.apps.admin.models import User
class Handler(web.View):
async def get_current_user(self):
"""Current user"""
session = await get_session(self.request)
email = session.get('email', None)
if email is None:
return None
try:
user = await settings.manager.get(
User
.select()
.where(User.email == email))
except (User.DoesNotExist, psycopg2.OperationalError,
peewee.IntegrityError, peewee.ProgrammingError):
return None
if not (user.active and user.superuser):
return None
return email
async def paging(self, count, per_page, page):
page_count = int(count / per_page) + int(bool(count % per_page))
if page > page_count or page < 1:
return None, None, None
prev_page = page - 1 if page > 1 else None
next_page = page + 1 if page < page_count else None
return page_count, prev_page, page, next_page
async def ajax_empty(self, status):
return web.json_response(dict(status=status))
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="layout.updatemenu.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
**kwargs
)
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy
import preprocess
import hashfeatures
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ------------', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
author_frame = pd.read_csv(filename)
class_labels = list(author_frame['author'].values)
del author_frame['id']
del author_frame['author']
text_list = list(author_frame['text'].values)
cleaned_text_list = preprocess.text_clean_pipeline_list(text_list)
feat_hash = hashfeatures.FeatureHash(max_feature_num=1000)
text_features = feat_hash.get_feature_set(cleaned_text_list)
X_train,X_test,y_train,y_test = train_test_split(text_features,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
from nodetraq.tests import *
class TestAuthController(TestController):
def test_index(self):
response = self.app.get(url(controller='auth', action='index'))
# Test response...
|
from django.contrib import admin
from .models import *
admin.site.register(StorageGraph)
admin.site.register(Layout)
admin.site.register(Taxonomy)
|
import pytest
from thefuck.rules.git_pull import match, get_new_command
from tests.utils import Command
@pytest.fixture
def stderr():
return '''There is no tracking information for the current branch.
Please specify which branch you want to merge with.
See git-pull(1) for details
git pull <remote> <branch>
If you wish to set tracking information for this branch you can do so with:
git branch --set-upstream-to=<remote>/<branch> master
'''
def test_match(stderr):
assert match(Command('git pull', stderr=stderr))
assert not match(Command('git pull'))
assert not match(Command('ls', stderr=stderr))
def test_get_new_command(stderr):
assert (get_new_command(Command('git pull', stderr=stderr))
== "git branch --set-upstream-to=origin/master master && git pull")
|
import pytest
import json
import tempfile
import pyethereum.trie as trie
from tests.utils import new_db
from pyethereum.slogging import get_logger, configure_logging
logger = get_logger()
configure_logging(':trace')
def check_testdata(data_keys, expected_keys):
assert set(data_keys) == set(expected_keys), \
"test data changed, please adjust tests"
def load_tests():
try:
fixture = json.load(open('fixtures/TrieTests/trietestnextprev.json', 'r'))
except IOError:
raise IOError("Could not read trietests.json from fixtures",
"Make sure you did 'git submodule init'")
return fixture
def run_test(name):
logger.debug('testing %s' % name)
t = trie.Trie(new_db())
data = load_tests()[name]
for k in data['in']:
logger.debug('updating with (%s, %s)' %(k, k))
t.update(k, k)
for point, prev, nxt in data['tests']:
assert nxt == (t.next(point) or '')
assert prev == (t.prev(point) or '')
def test_basic():
run_test('basic')
|
def createMail(sender, recipient, subject, html, text):
'''
A slightly modified version of Recipe #67083, included here
for completeness
'''
import MimeWriter, mimetools, cStringIO
out = cStringIO.StringIO()
htmlin = cStringIO.StringIO(html)
txtin = cStringIO.StringIO(text)
writer = MimeWriter.MimeWriter(out)
writer.addheader("From", sender)
writer.addheader("To", recipient)
writer.addheader("Subject", subject)
writer.addheader("MIME-Version", "1.0")
writer.startmultipartbody("alternative")
writer.flushheaders()
subpart = writer.nextpart()
subpart.addheader("Content-Transfer-Encoding", "quoted-printable")
pout = subpart.startbody("text/plain", [("charset", 'us-ascii')])
mimetools.encode(txtin, pout, 'quoted-printable')
txtin.close()
subpart = writer.nextpart()
subpart.addheader("Content-Transfer-Encoding", "quoted-printable")
pout = subpart.startbody("text/html", [("charset", 'us-ascii')])
mimetools.encode(htmlin, pout, 'quoted-printable')
htmlin.close()
writer.lastpart()
msg = out.getvalue()
out.close()
return msg
def sendMail(sender, recipient, subject, html, text):
import smtplib
message = createMail(sender, recipient, subject, html, text)
server = smtplib.SMTP("localhost")
server.sendmail(sender, recipient, message)
server.quit()
def main():
'''
the main body of your program
'''
print x # will raise an exception
if __name__ == '__main__':
try:
main()
except:
import sys, cgitb
sendMail('bugs@yourdomain.com',
'webmaster@yourdomain.com',
'Error on yourdomain.com',
cgitb.html(sys.exc_info()),
cgitb.text(sys.exc_info()))
# handle the error gracefully, perhaps doing a
# http redirect if this is a cgi application or
# otherwise letting the user know something happened
# but that, hey, you are all over it
|
from horse import build_app
application = build_app(name=__name__, debug=True).web_app
if __name__ == "__main__":
application.run()
|
import psutil
import socket
import platform
from uuid import getnode
from Server import Server
def get_drive_mountpoints():
return [drive.mountpoint for drive in psutil.disk_partitions()]
def mock_server():
mocked_server = Server()
return mocked_server
def discover():
discovery_data = {}
discovery_data["FQDN"] = socket.getfqdn()
discovery_data["IP"] = socket.gethostbyname(socket.getfqdn())
discovery_data["MAC"] = getnode()
discovery_data["MOUNT_POINTS"] = get_drive_mountpoints()
discovery_data["CPU_COUNT"] = psutil.cpu_count()
discovery_data["RAM_COUNT"] = psutil.virtual_memory().total
discovery_data["PLATFORM"] = platform.system()
return discovery_data
|
"""models.py: Blog database tables and columns."""
import os
from datetime import datetime
from django.conf import settings
from django.db import models
from django.utils.text import slugify
from markdown import markdown
class Category(models.Model):
title = models.CharField(max_length=70)
slug = models.SlugField(max_length=70)
description = models.TextField(blank=True)
class Meta:
ordering = 'title',
verbose_name_plural = 'Categories'
def __unicode__(self):
return self.title
class Tag(models.Model):
name = models.SlugField(max_length=70)
description = models.TextField(blank=True)
class Meta:
ordering = 'name',
def __unicode__(self):
return self.name
class Author(models.Model):
nickname = models.SlugField()
class Meta:
ordering = 'nickname',
def __unicode__(self):
return self.nickname
class Post(models.Model):
# Foreign Keys:
UNCATEGORIZED = 1
TAGGED = 1
ADMINISTRATOR = 1
# Post States:
DRAFT = 0
PUBLISHED = 1
CHOICES = (
(DRAFT, 'Draft'),
(PUBLISHED, 'Published'),
)
# Image Paths:
IMAGE_UPLOAD_PATH = lambda instance, image: '{path}/{image}'.format(
path=instance.category.slug,
image='{name}.{extension}'.format(
name=slugify(os.path.splitext(image)[0]).replace('_', ''),
extension=image.split('.')[-1].lower()
)
)
# Fields:
title = models.CharField(max_length=70)
slug = models.SlugField(max_length=70)
image = models.ImageField(upload_to=IMAGE_UPLOAD_PATH, blank=True)
summary = models.TextField()
content = models.TextField()
content_html = models.TextField(editable=False)
category = models.ForeignKey(Category, default=UNCATEGORIZED)
tags = models.ManyToManyField(Tag, default=[TAGGED])
author = models.ForeignKey(Author, default=ADMINISTRATOR)
state = models.IntegerField(choices=CHOICES, default=DRAFT)
published = models.DateField(default=datetime.now, blank=True)
modified = models.DateField(blank=True, null=True)
class Meta:
ordering = 'published',
def save(self):
self.content_html = markdown(self.content, ['codehilite'])
self.modified = datetime.now()
if not self.id:
self.published = self.modified
super(Post, self).save()
def __unicode__(self):
return self.title
|
from Tkinter import *
import os
import tkFileDialog
import tkMessageBox
def clear(win):
win.spctab.sttsobj.clear()
win.spctab.polobj.clear()
def reload(win):
win.spctab.sttsobj.reload()
win.spctab.polobj.reload()
def save():
ruleflname=tkFileDialog.asksaveasfilename(filetypes=[("Firewall Rules",".rules")],defaultextension=".rules")
if(ruleflname!=""):
os.popen('iptables-save > '+ruleflname,"r")
tkMessageBox.showinfo("Saved","Firewall Rules have been saved as: "+ruleflname)
def restore():
ruleflname=tkFileDialog.askopenfilename(filetypes=[("Firewall Rules",".rules")])
if(ruleflname!=""):
os.popen('iptables-restore < '+ruleflname,"r")
tkMessageBox.showinfo("Restored","Firewall Rules have been restored from: "+ruleflname)
|
from django.test import TestCase, Client
from django.conf import settings
from datetime import datetime
from pytz import UTC
from dj_twilio_sms import utils
from dj_twilio_sms.models import OutgoingSMS
class SmsSendingTest(TestCase):
def test_send_sms(self):
result = utils.send_sms(
request=None,
to_number=settings.TWILIO_VERIFIED_NUMBER,
body='Test Message from tox'
)
self.assertTrue(isinstance(result, OutgoingSMS))
self.assertEqual(result.status, 'queued')
self.assertTrue(isinstance(result.sent_at, datetime))
self.assertEqual(result.sent_at.tzinfo, UTC)
self.assertEqual(result.created_at.tzinfo, UTC)
self.assertIsNone(result.delivered_at)
# make fake response
client = Client(
HTTP_USER_AGENT='Mozilla/5.0',
HTTP_X_TWILIO_SIGNATURE='emin'
)
response = client.post('/messaging/callback/sent/{pk}/'.format(pk=result.pk), {
'MessageStatus': 'sent',
'ApiVersion': '2010-04-01',
'SmsSid': 'SMS9i8d7spw6o5r4k3sspt2e1s0t1i2n34',
'SmsStatus': 'sent',
'To': settings.TWILIO_VERIFIED_NUMBER,
'From': settings.TWILIO_PHONE_NUMBER,
'MessageSid': 'SMS9i8d7spw6o5r4k3sspt2e1s0t1i2n34',
'AccountSid': settings.TWILIO_ACCOUNT_SID
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers['content-type'][1], 'application/xml')
# check if sms details updated
sms = OutgoingSMS.objects.get(pk=result.pk)
self.assertTrue(isinstance(sms.delivered_at, datetime))
self.assertEqual(sms.status, 'sent')
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
def plot_cm(target_names, cm, cm_norm):
plt.figure(figsize=(10, 5))
plt.title(u'Matriz de Confusão')
# a = plt.subplot(121)
# a.set_title(u"Matriz de Confusão Regular", fontsize=16)
# plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
# plt.colorbar(fraction=0.046, pad=0.10)
tick_marks = np.arange(len(target_names))
# plt.xticks(tick_marks, target_names, rotation=45)
# plt.yticks(tick_marks, target_names)
# plt.ylabel(u'Classe Verdadeira', fontsize=16)
# plt.xlabel(u'Classe Estimada', fontsize=16)
b = plt.subplot(122)
b.set_title(u"Matriz de Confusão Normalizada", fontsize=16)
plt.imshow(cm_norm, interpolation='nearest', cmap=plt.cm.Blues)
plt.colorbar(fraction=0.046, pad=0.10)
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.ylabel(u'Classe Verdadeira', fontsize=16)
plt.xlabel(u'Classe Estimada', fontsize=16)
plt.tight_layout()
def evaluate(X_train, Y_train, X_test, Y_test, model, table_names):
# Define os dados de interesse para o problema
# Treina com a partição de treinamento
model.fit(X_train, Y_train)
# Verificação com a partição de teste
Y_pred = model.predict(X_test)
score = model.score(X_test, Y_test)
print("Score médio: {0:.2f}".format(score))
# Cria a matriz de confusão regular e normalizada
cm = confusion_matrix(Y_test, Y_pred)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Imprime as matrizes de confusão
np.set_printoptions(precision=2)
plot_cm(table_names, cm, cm_norm)
# Exibe todas as figuras
plt.show()
def partition(X, Y, model, table_names):
# Define os dados de interesse para o problema
# X é o vetor de Características (no seu exemplo, você chamou de "features")
# Y é o vetor de Classes (no seu exemplo, você chamou de "labels")
# Cria 10 partições com os dados de disponíveis
kf = cross_validation.StratifiedKFold(Y, n_folds=10)
# Treina o modelo com base nos dados de treinamento EM CADA PARTIÇÃO
# e calcula os escores
round = 1
scores = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
# Instancia o algoritmo desejado (no caso, uma Árvore de Decisão)
#model = DecisionTreeClassifier()
# Treina com a partição de treinamento
model.fit(X_train, Y_train)
# Verificação com a partição de teste
Y_pred = model.predict(X_test)
score = model.score(X_test, Y_test)
scores.append(score)
# Cria a matriz de confusão regular e normalizada
cm = confusion_matrix(Y_test, Y_pred)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Imprime as matrizes de confusão
np.set_printoptions(precision=2)
#print(u"Rodada #{0} (score: {1:.2f})").format(round, score)
round = round + 1
#print(u"Partição de treinamento: do índice #{} ao índice #{}").format(train_index[0], train_index[-1])
#print(u"Partição de teste: do índice #{} ao índice #{}").format(test_index[0], test_index[-1])
#print(u"----------------------------")
#print(u'Matriz de Confusão Regular')
#print(cm)
#print(u'Matriz de Confusão Normalizada')
#print(cm_norm)
plot_cm(table_names, cm, cm_norm)
# Imprime o score mínimo, máximo e médio
scores = np.array(scores)
print(u"Score mínimo: {0:.2f} Score máximo: {1:.2f} Score médio: {2:.2f}").format(scores.min(), scores.max(), scores.mean())
# Exibe todas as figuras
plt.show()
|
import socket
import urllib.request
import urllib.parse
from urllib.error import URLError
import json
import time
import datetime
__author__ = 'RFS4ever'
__homepage__ = 'https://github.com/RFS4ever/pyDdnsPod'
__version__ = '0.5'
timeout = 10
socket.setdefaulttimeout(timeout)
current_ip = None
config = {
'ddns_api_url': 'https://dnsapi.cn/Record.Ddns',
'headers': {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/json",
"User-Agent": u"{0:s}'s pyDdnsPod/{1:s} (beyondrookie#gmail.com)".format(__author__, __version__)
},
'params': {
'login_email': 'email', # Change to yours
'login_password': 'password', # Change to yours
'format': 'json',
'domain_id': '8888', # Change to yours
'record_id': '8888', # Change to yours
'sub_domain': 'ddns', # Change to yours
'record_line': '默认',
}
}
def get_current_time():
now = datetime.datetime.now()
return now.strftime('%Y/%m/%d %H:%M:%S %a')
def get_public_ip(original_ip):
"""Create socket connection to get the current public ip address"""
addr = ('ns1.dnspod.net', 6666)
try:
sock = socket.create_connection(addr)
public_ip = sock.recv(16).decode('utf-8')
sock.close()
except Exception as e:
print(e)
public_ip = original_ip
now = get_current_time()
print(u'[{0:s}] Public ip is "{1:s}"'.format(now, public_ip))
return public_ip
def ddns(ip):
"""Connect to the API server, change the ddns's ip record to current public ip"""
url = config['ddns_api_url']
params = config['params']
params.update({'value': ip})
data = urllib.parse.urlencode(params)
data = data.encode('utf-8')
headers = config['headers']
req = urllib.request.Request(url, data, headers)
try:
res = urllib.request.urlopen(req)
except URLError as e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
else:
# everything is fine
return_info = res.read().decode('utf-8')
return_info = json.loads(return_info)
if return_info.get('status', {}).get("code") == '1':
now = get_current_time()
print('=' * 80)
print(u'[{0:s}] New ip changed to "{1:s}"!!!'.format(now, ip))
print(json.dumps(return_info, indent=4))
print('=' * 80)
return True
else:
raise Exception(return_info)
def main():
global current_ip
# Starting msg
now = get_current_time()
print('*' * 80)
print(u'[{0:s}] Bootstrapping {1:s}... '.format(now, config['headers']['User-Agent']))
print('*' * 80)
# Main body
while True:
try:
ip = get_public_ip(current_ip)
if current_ip != ip:
if ddns(ip):
current_ip = ip
except Exception as e:
print(e)
# print(json.dumps(e.decode('utf-8'), indent=4))
time.sleep(60 * 60) # 1 hour (sleep in seconds)
if __name__ == '__main__':
main()
|
"""
Quickly threw together an NN controller for a project.
Carries over from parametric CL controller implementations.
Needs a lot of cleanup.
"""
from __future__ import division
import numpy as np
import numpy.linalg as npl
class NN_controller:
def __init__(self, dt, q0, target, path_type,
kp, kd, n, kv, kw,
umax, vmax, amax):
"""
document
"""
self.nstates = len(q0)
self.ncontrols = len(umax)
self.nsigs = n
self.sig = lambda x: np.concatenate(([1], np.tanh(x)))
self.sigp = lambda x: np.tile(1/(np.cosh(x)**2), (self.nsigs+1, 1))
self.set_gains(kp, kd, kv, kw)
self.set_limits(umax, vmax, amax)
self.V = np.zeros((self.nstates+1, self.nsigs))
self.W = np.zeros((self.nsigs+1, self.ncontrols))
self.y = np.zeros(self.ncontrols)
self.time = 0
self.set_path(q0, target, path_type, dt)
self.kill = False
def set_gains(self, kp, kd, kv, kw):
"""
document
"""
self.kp = np.array(kp, dtype=np.float32)
self.kd = np.array(kd, dtype=np.float32)
self.kr = self.kp / self.kd
self.kv = np.array(kv, dtype=np.float32)
self.kw = np.array(kw, dtype=np.float32)
def set_limits(self, umax, vmax, amax):
"""
Sets model limits.
Uses the limits to compute a model reference for tracking,
and uses repmax for limiting repetitive learning.
"""
self.umax = np.array(umax, dtype=np.float32)
self.vmax = np.array(vmax, dtype=np.float32)
self.amax = np.array(amax, dtype=np.float32)
self.saturated = False
if np.inf in self.umax or 0 in self.umax:
self.umaxref = np.array([250, 30], dtype=np.float32)
else:
self.umaxref = self.umax
self.dref = self.umaxref / self.vmax
if np.inf in self.amax:
self.mref = np.array([0.01, 0.01], dtype=np.float32)
else:
self.mref = self.umaxref / self.amax
def set_path(self, q0, target, path_type, dt):
"""
Resets controller time and reference acceleration.
Sets the path initial state, the target position, and the
type of path. Updates reference q to its initial t=0 value.
If the path will be cyclic, repetitive learning is enabled.
The path cycle period is hardcoded in.
"""
self.path_time = 0
self.qref = np.array(q0)
self.aref = np.zeros(self.ncontrols)
self.path_type = path_type
if path_type == 'train':
self.target = 2*np.pi*(np.random.rand(2) - 0.5)
else:
self.target = np.array(target)
self.update_ref(0)
def get_effort(self, q, dt):
"""
Returns the vector of torques as a PD controller plus
a feedforward term that uses an estimate of the system's
dynamics. The output is saturated at umax as
specified by the user previously. Before returning the
torques, the latest dynamics estimate is also updated.
"""
# Tracking errors
E = self.qref[:self.ncontrols] - q[:self.ncontrols]
Edot = self.qref[self.ncontrols:] - q[self.ncontrols:]
r = self.kr*E + Edot
# Control law
u = self.kp*E + self.kd*Edot + self.y
# Adapt NN
if not self.saturated:
x = np.concatenate(([1], q))
VTx = self.V.T.dot(x)
Wdot = self.kw.dot(np.outer(self.sig(VTx), r))
Vdot = self.kv.dot(np.outer(x, r).dot(self.W.T).dot(self.sigp(VTx)))
self.W = self.W + Wdot*dt
self.V = self.V + Vdot*dt
self.y = self.W.T.dot(self.sig(self.V.T.dot(x)))
# Update reference trajectory and controller life time
self.update_ref(dt)
self.time = self.time + dt
# Safety saturation of output
self.saturated = False
for i, mag in enumerate(abs(u)):
if mag > self.umax[i]:
u[i] = self.umax[i] * np.sign(u[i])
self.saturated = True
# Return effort torques
return u
def update_ref(self, dt):
"""
Updates the reference state qref depending on the
settings created in set_path. In every case, a
spring-damper tuned to vmax and amax is used to
generate the profile between each discontinuous target.
'train': sequence of random joint-space configurations
"""
self.path_time = self.path_time + dt
if self.path_type == 'train':
Eref = self.target[:2] - self.qref[:2]
Erefdot = -self.qref[2:]
uref = self.kp*Eref + self.kd*Erefdot
self.qref = self.qref + self.reference_dynamics(self.qref, uref)*dt
if self.path_time > 5:
self.set_path(self.qref, 2*np.pi*(np.random.rand(2) - 0.5), 'train', dt)
else:
raise ValueError("Invalid path_type.")
def reference_dynamics(self, qref, uref):
"""
Computes reference state derivative (qrefdot).
Takes reference state (qref) and reference control input (uref).
Spring-damper model tuned to vmax (terminal velocity) and amax (saturation).
"""
# Imposed actuator saturation
for i, mag in enumerate(abs(uref)):
if mag > self.umaxref[i]:
uref[i] = self.umaxref[i] * np.sign(uref[i])
# Simple linear evolution
return np.concatenate((qref[2:] , (uref - self.dref*qref[2:]) / self.mref))
|
__version__ = "0.0.1"
default_app_config = "bettertexts.apps.ImprovetextAppConfig"
|
import subprocess
word="In de hal van kasteel Elseneur."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Stil nu! De schone Ophelia! Nimf, gedenk in uw gebeden al mijn zonden."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Edele heer, hoe gaat het u de laatste tijd?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ik dank u heel goed."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ik heb nog souvenirs van u, die ik al lang terug had willen geven. Hier... neemt u ze."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Nee, nee, ik niet ik heb u nimmer iets gegeven."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="U weet heel goed, heer, dat u 't wel gedaan hebt, en met zó zoete woorden dat hun waarde nog groter werd. Hun geur is nu vervlogen, neem ze dus terug; want voor een edele geest verbleekt de rijkste gift wanneer de gever zich arm aan liefde toont. Hier zijn ze, heer."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Aha! ben je kuis?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Heer"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ben je mooi?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Wat bedoelt uwe hoogheid?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Dat als je kuis en mooi bent, je kuisheid geen omgang met je schoonheid zou mogen toestaan."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Maar, heer, kan schoonheid ooit beter omgang hebben dan met kuisheid?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Jazeker, want de macht van de schoonheid zal de kuisheid eer der in een koppelaarster veranderen, dan dat kuisheid de schoonheid dwingen kan haar te gelijken. Dit was vroeger een paradox, maar nu wordt het door de tijd bewezen. Ik heb je eens liefgehad."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ja, heer, dat hebt u me doen geloven."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Je had me niet moeten geloven, want de deugd kan niet zó geënt worden op onze oude stam, dat er geen zweem van overblijft. Ik heb je niet liefgehad."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Dan ben ik des te meer bedrogen."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ga in een klooster! Waarom zou je zondaars fokken? Ik mag wel zeggen dat ik vrij deugdzaam ben, maar toch zou ik me kunnen beschuldigen van dingen waarom mijn moeder me beter niet had kunnen baren. Ik ben erg hoogmoedig, wraak zuchtig en eergierig, en ik heb meer wandaden voor 't grijpen dan gedachten om ze uit te drukken, verbeelding om ze vorm te geven of tijd om ze te begaan. Wat moeten kerels als ik ook rond kruipen tussen hemel en aarde? Wij zijn aartsschavuiten geloof niemand van ons. Maak dat je in een klooster komt! Waar is je vader?"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Thuis, heer."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Laat dan de deuren achter hem dichtdoen, opdat hij nergens anders voor gek kan spelen dan in zijn eigen huis. Vaarwel."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="0 hemelse goedheid, help hem! "
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Mocht je trouwen, dan geef ik je deze vloek als bruidsschat mee, je kunt zo kuis als ijs, zo zuiver als sneeuw zijn, tóch ontkom je niet aan de laster. Ga in een klooster! Vaarwel. Of als je met alle geweld trouwen wilt, trouw dan een idioot, want mannen met hersens weten te goed wat voor monsters je van hen maakt. Naar een klooster en gauw! Vaarwel."
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Ik weet maar al te goed hoe jullie je beschildert. God heeft je een gezicht gegeven, maar jullie maakt je een ander. Je huppelt en trippelt, je geeft Gods schepselen bijnamen en laat je wulpsheid doorgaan voor argeloosheid. Ga weg, ik wil er niets meer van weten het heeft me gek gemaakt. Ik zeg je, dat er geen huwelijken meer moeten komen. De getrouwden mogen blijven leven op één na - en de ongetrouwden moeten blijven zoals ze zijn. Naar een klooster! Ga! "
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
word="Wat een edele geest is hier verscheurd! Oog, tong en zwaard van hoveling, geleerde en krijgsman, hoop en bloem van onze staat, spiegel der zeden, toonbeeld van beschaving, door eerbetoon omringd... voorgoed verloren. En ik, rampzaligste van alle vrouwen, die honing zoog uit zijn welluidend woord, hoor nu de tonen van dat helder brein verward en schril als een ontstemde beiaard, en zie het ongeëvenaarde beeld van bloesemende jeugd, verdord door waanzin. 0, wee mij, die gezien heeft wat ik zag, zie wat ik zie!"
stdout, stderr = subprocess.Popen(
['cowsay', word]).communicate()
def main():
""" Hiermee opent u de script bestanden en print u de conversatie. """
#roep de python bestand op en voert het uit, met de juiste cowfile.
py = conversation.py
print conversation.py #print conversatie uit script bestand
# ['cowsay', word]).communicate()
|
"""
DjangoCodeMirror template tags and filters for assets
"""
import copy, json
from django import template
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from djangocodemirror import settings_local
register = template.Library()
class HtmlAssetsRender(template.Node):
"""
Generate HTML of the node *HtmlMediaRender*
"""
def __init__(self, default_template, *args):
"""
:type default_template: string
:param default_template: Default template name to use if not given in the tag args
"""
self.args = args
self.default_template = default_template
def resolve_items(self, args, context):
# Try to resolve all given arguments
given_fields = []
for item in args:
fieldname = template.Variable(item)
try:
field = fieldname.resolve(context)
except template.VariableDoesNotExist:
# We should be explicit and throw an error ?
pass
else:
given_fields.append(field)
return given_fields
def render(self, context):
"""
Render the HTML
:type context: object ``django.template.Context``
:param context: Objet du contexte du tag.
:rtype: string
:return: Generated HTML to load needed assets (CSS, JS)
TODO: 'mode' setting will not be accumulated, so for multiple fields with
different mode, only one will have its right mode.
We should go for another solution wich merge assets list.
"""
template_path = self.default_template
given_fields = self.resolve_items(self.args, context)
# If the first item is a string, assume that it is a template path, pop it and
# use it
if len(given_fields)>0 and isinstance(given_fields[0], basestring):
template_path = given_fields.pop(0)
first_field = given_fields.pop(0)
# We need to trigger Widget's media attribute to have the 'editor_config_manager' attribute
first_field.field.widget.media
app_settings = first_field.field.widget.editor_config_manager.editor_config
css, js = first_field.field.widget.editor_config_manager.find_assets()
# Update a global context for all fields, so we save only actived option, this
# will load all needed assets for all given fields
for field in given_fields:
field_widget = field.field.widget
app_settings = self.editor_config_manager.editor_config
app_settings.update(self.editor_config_manager.editor_config)
css_sup, js_sup = first_field.field.widget.editor_config_manager.find_assets()
css = css + css_sup
js = js + js_sup
context.update({
'djangocodemirror_css': css,
'djangocodemirror_js': js,
'css_bundle_name': first_field.field.widget.editor_config_manager.settings['css_bundle_name'],
'js_bundle_name': first_field.field.widget.editor_config_manager.settings['js_bundle_name'],
})
html = template.loader.get_template(template_path).render(template.Context(context))
return mark_safe(html)
@register.tag(name="djangocodemirror_get_assets")
def do_djangocodemirror_get_assets(parser, token):
"""
Return the html to load all needed assets for all given djangocodemirror fields
This can only be used on a field that have allready been rendered.
Usage : ::
{% load djangocodemirror_assets %}
<html>
<head>
...
{% djangocodemirror_get_assets form.myfield1 form.myfield2 %}
</head>
...
</html>
Warning, the tag does not throw explicit template errors for invalid fields.
:type parser: object ``django.template.Parser``
:param parser: Objet du parser de template.
:type token: object ``django.template.Token``
:param token: Objet de la chaîne découpée du tag capturé dans le template.
:rtype: object ``PageMenuTagRender``
:return: L'objet du générateur de rendu du tag.
"""
args = token.split_contents()
if len(args) < 2:
raise template.TemplateSyntaxError, "You need to specify at less one form field"
else:
return HtmlAssetsRender("djangocodemirror/include_field_assets.html", *args[1:])
do_djangocodemirror_get_assets.is_safe = True
@register.tag(name="djangocodemirror_get_bundles")
def do_djangocodemirror_get_bundles(parser, token):
"""
It works exactly like the "djangocodemirror_get_assets" except it use django-assets
bundles in place of direct assets. You should not use this if you don't have 'django-assets'
installed.
"""
args = token.split_contents()
if len(args) < 2:
raise template.TemplateSyntaxError, "You need to specify at less one form field"
else:
return HtmlAssetsRender("djangocodemirror/include_field_bundles.html", *args[1:])
do_djangocodemirror_get_bundles.is_safe = True
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import puzzle.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('puzzle', '0002_blank_display_order'),
]
operations = [
migrations.RemoveField(
model_name='puzzle',
name='author',
),
migrations.AddField(
model_name='puzzle',
name='user',
field=models.ForeignKey(default=puzzle.models.default_user, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Author',
),
]
|
'''
Created on 25.03.2013
@author: lehmann
'''
from pyads import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
class MainForm(QDialog):
def _initADS(self):
self.adsPort = adsPortOpen()
self.adsAdr = adsGetLocalAddress()
self.adsAdr.setPort(PORT_SPS1)
def __init__(self, parent=None):
super(MainForm, self).__init__(parent)
#ADS initialisieren
self._initADS()
#Elemente
bit1CheckBox = QCheckBox("Bit1")
(errCode, bit1) = adsSyncReadReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+0, PLCTYPE_BOOL)
if errCode == 0: bit1CheckBox.setChecked(bit1)
bit2CheckBox = QCheckBox("Bit2")
(errCode, bit2) = adsSyncReadReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+1, PLCTYPE_BOOL)
if errCode == 0: bit2CheckBox.setChecked(bit2)
bit3CheckBox = QCheckBox("Bit3")
(errCode, bit3) = adsSyncReadReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+2, PLCTYPE_BOOL)
if errCode == 0: bit3CheckBox.setChecked(bit3)
bit4CheckBox = QCheckBox("Bit4")
(errCode, bit4) = adsSyncReadReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+3, PLCTYPE_BOOL)
if errCode == 0: bit4CheckBox.setChecked(bit4)
#Layout
layout = QVBoxLayout()
layout.addWidget(bit1CheckBox)
layout.addWidget(bit2CheckBox)
layout.addWidget(bit3CheckBox)
layout.addWidget(bit4CheckBox)
self.setLayout(layout)
#Signale
self.connect(bit1CheckBox, SIGNAL("stateChanged(int)"), self.bit1CheckBox_stateChanged)
self.connect(bit2CheckBox, SIGNAL("stateChanged(int)"), self.bit2CheckBox_stateChanged)
self.connect(bit3CheckBox, SIGNAL("stateChanged(int)"), self.bit3CheckBox_stateChanged)
self.connect(bit4CheckBox, SIGNAL("stateChanged(int)"), self.bit4CheckBox_stateChanged)
def __del__(self):
adsPortClose()
def bit1CheckBox_stateChanged(self, state):
adsSyncWriteReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+0, state, PLCTYPE_BOOL)
def bit2CheckBox_stateChanged(self, state):
adsSyncWriteReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+1, state, PLCTYPE_BOOL)
def bit3CheckBox_stateChanged(self, state):
adsSyncWriteReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+2, state, PLCTYPE_BOOL)
def bit4CheckBox_stateChanged(self, state):
adsSyncWriteReq(self.adsAdr, INDEXGROUP_MEMORYBIT, 100*8+3, state, PLCTYPE_BOOL)
if __name__ == "__main__":
app = QApplication(sys.argv)
frm = MainForm()
frm.show()
app.exec_()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from home import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'pisite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^logs/', include('logs.urls', namespace="logs")),
url(r'^motionSound/', include('motionSound.urls', namespace="motionSound")),
url(r'^admin/', include(admin.site.urls)),
# Pages within the home app
#
# Home page
url(r'^$', views.index, name="home"),
# Sound upload page
#url(r'^uploadSound$', views.uploadSound, name="uploadSound")
)
|
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, \
UpdateModelMixin
from .models import User
from .serializers import UserSerializer
from core.api_utils import IsOwnerOrReadOnlyUser
class UserAPIView(GenericViewSet,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin):
model = User
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnlyUser]
filter_fields = ('id', 'username',)
|
from ..layers.core import Layer
from ..utils.theano_utils import shared_zeros
from .. import initializations
import theano.tensor as T
class BatchNormalization(Layer):
'''
Reference:
Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
http://arxiv.org/pdf/1502.03167v3.pdf
mode: 0 -> featurewise normalization
1 -> samplewise normalization (may sometimes outperform featurewise mode)
'''
def __init__(self, input_shape, epsilon=1e-6, mode=0, weights=None):
super(BatchNormalization,self).__init__()
self.init = initializations.get("uniform")
self.input_shape = input_shape
self.epsilon = epsilon
self.mode = mode
self.gamma = self.init((self.input_shape))
self.beta = shared_zeros(self.input_shape)
self.params = [self.gamma, self.beta]
if weights is not None:
self.set_weights(weights)
def get_output(self, train):
X = self.get_input(train)
if self.mode == 0:
m = X.mean(axis=0)
# manual computation of std to prevent NaNs
std = T.mean((X-m)**2 + self.epsilon, axis=0) ** 0.5
X_normed = (X - m) / (std + self.epsilon)
elif self.mode == 1:
m = X.mean(axis=-1, keepdims=True)
std = X.std(axis=-1, keepdims=True)
X_normed = (X - m) / (std + self.epsilon)
out = self.gamma * X_normed + self.beta
return out
def get_config(self):
return {"name":self.__class__.__name__,
"input_shape":self.input_shape,
"epsilon":self.epsilon,
"mode":self.mode}
|
import json
import time
from decimal import Decimal
from dashticker import redis_key
from dashticker.ws import Client, Handler
def on_message(data, redis):
market = 'USD_BTC'
if data['type'] != 'ticker':
return
ticker = {
'source': 'coinapult',
'market': market,
'timestamp': time.time(),
'ask': int(Decimal(data['small']['ask']) * 100),
'bid': int(Decimal(data['small']['bid']) * 100)
}
redis.publish(redis_key, json.dumps(ticker))
def main():
handler = Handler('coinapult', on_message)
cli = Client("wss://stream.coinapult.com:8123/websocket", handler)
if __name__ == "__main__":
main()
|
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
HOUR_IS_PAST_MESSAGE = "This office hour is in the past"
HOUR_HAS_BEEN_CANCELED_MESSAGE = "This office hour has been canceled"
HOUR_NOT_SPECIFIED_MESSAGE = "Office hour has not been specified"
HOUR_OWNED_BY_ANOTHER_MESSAGE = "This office hour is owned by another user"
class BaseMentorProgramOfficeHour(AcceleratorModel):
program = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Program"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
mentor = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='mentor_officehours',
on_delete=models.CASCADE)
finalist = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name="Finalist",
blank=True,
null=True,
related_name='finalist_officehours',
on_delete=models.CASCADE)
startup = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Startup"),
blank=True,
null=True,
related_name='startup_officehours',
on_delete=models.SET_NULL)
start_date_time = models.DateTimeField(db_index=True)
end_date_time = models.DateTimeField(db_index=True)
description = models.TextField(blank=True)
location = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Location"),
null=True,
blank=True,
on_delete=models.CASCADE)
notify_reservation = models.BooleanField(default=True)
topics = models.CharField(max_length=2000, blank=True, default="")
meeting_info = models.CharField(max_length=256, blank=True, default="")
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_mentorprogramofficehour'
abstract = True
verbose_name = "Office Hour"
unique_together = ('program', 'mentor', 'start_date_time')
ordering = ['start_date_time']
def __str__(self):
hour_type = "Reserved"
if self.is_open():
hour_type = "Open"
return "%s office hour with %s" % (hour_type, self.mentor)
def is_open(self):
return not bool(self.finalist)
|
from net_layers import *
from utils import get_idx_from_sent,make_idx_data_cv
import cPickle,time,sys
from collections import OrderedDict
theano_rng = RandomStreams(rng.randint(2 ** 30))
def dropout(input,dropout_rate):
corrupted_matrix = theano_rng.binomial(
size = input.shape,
n = 1,
p = 1-dropout_rate,
dtype = theano.config.floatX
)
return corrupted_matrix*input
def as_floatX(variable):
if isinstance(variable, float):
return numpy.cast[theano.config.floatX](variable)
if isinstance(variable, numpy.ndarray):
return numpy.cast[theano.config.floatX](variable)
return theano.tensor.cast(variable, theano.config.floatX)
def sgd_updates_adadelta(params,cost,rho=0.95,epsilon=1e-6,norm_lim=9):
"""
adadelta update rule, mostly from
https://groups.google.com/forum/#!topic/pylearn-dev/3QbKtCumAW4 (for Adadelta)
"""
updates = OrderedDict({})
exp_sqr_grads = OrderedDict({})
exp_sqr_ups = OrderedDict({})
gparams = []
for param in params:
empty = numpy.zeros_like(param.get_value())
exp_sqr_grads[param] = theano.shared(value = as_floatX(empty),name ="exp_grad_%s" % param.name)
gp = T.grad(cost, param)
exp_sqr_ups[param] = theano.shared(value = as_floatX(empty),name ="exp_grad_%s" % param.name)
gparams.append(gp)
for param, gp in zip(params, gparams):
exp_sg = exp_sqr_grads[param]
exp_su = exp_sqr_ups[param]
up_exp_sg = rho * exp_sg + (1 - rho) * T.sqr(gp)
updates[exp_sg] = up_exp_sg
step = -(T.sqrt(exp_su + epsilon) / T.sqrt(up_exp_sg + epsilon)) * gp
updates[exp_su] = rho * exp_su + (1 - rho) * T.sqr(step)
stepped_param = param + step
if (param.get_value(borrow=True).ndim == 2) and (param.name!='Words'):
col_norms = T.sqrt(T.sum(T.sqr(stepped_param), axis=0))
desired_norms = T.clip(col_norms, 0, T.sqrt(norm_lim))
scale = desired_norms / (1e-7 + col_norms)
updates[param] = stepped_param * scale
else:
updates[param] = stepped_param
return updates
def build_model(layer0_input,input_h,input_w,batch_size,filter_hs=[3,4,5]):
"""
construct the model
return params and top_layer
"""
layer0_input -= T.mean(layer0_input, axis = 0) # zero-center 可以减少模型抖动,81.5%
corrupted_input = dropout(layer0_input,0.6)
input_maps = 1
filter_maps = 100
filter_w = input_w
filter_shapes = []
pool_sizes = []
for filter_h in filter_hs:
shape = (
filter_maps,
input_maps,
filter_h,
filter_w
)
filter_shapes.append(shape)
pool_size = (
input_h - filter_h + 1,
1
)
pool_sizes.append(pool_size)
conv_layers = []
layer0_outputs = []
for i in xrange(len(filter_hs)):
conv_layer = Conv_Pool_layer(
input = layer0_input,
input_shape = (batch_size,input_maps,input_h,input_w),
filter_shape = filter_shapes[i],
pool_shape = pool_sizes[i],
activation = T.nnet.relu
# 激活函数对模型的性能影响很大,relu比sigmod,softplus,tanh 好太多
)
layer0_output = conv_layer.output.flatten(2)
conv_layers.append(conv_layer)
layer0_outputs.append(layer0_output)
layer1_input = T.concatenate(layer0_outputs,1)
d_conv_layers = []
d_layer0_outputs = []
for i in xrange(len(filter_hs)):
d_conv_layer = Conv_Pool_layer(
input = corrupted_input,
input_shape = (batch_size,input_maps,input_h,input_w),
filter_shape = filter_shapes[i],
pool_shape = pool_sizes[i],
activation = T.nnet.relu,
W = conv_layers[i].W,
b = conv_layers[i].b
)
d_layer0_output = d_conv_layer.output.flatten(2)
d_conv_layers.append(d_conv_layer)
d_layer0_outputs.append(d_layer0_output)
d_layer1_input = T.concatenate(d_layer0_outputs,1)
norm_d = T.nnet.softmax(d_layer1_input)
norm_o = T.nnet.softmax(layer1_input)
recon_cost = -T.nnet.categorical_crossentropy(norm_d, norm_o).mean()
top_layer_input = layer1_input
top_layer_input -= T.mean(top_layer_input, axis = 0) # zero-center 可以减少模型抖动
d_layer1_input -= T.mean(d_layer1_input,axis = 0)
#top_layer_input /= T.std(top_layer_input, axis = 0) # normalize
'''
It only makes sense to apply this pre-processing if you have a reason to believe that different
input features have different scales (or units), but they should be of approximately equal importance to the learning algorithm.
In case of images, the relative scales of pixels are already approximately equal (and in range from 0 to 255),
so it is not strictly necessary to perform this additional pre-processing step.
'''
# 对特征做一个gibbs采样怎么样
top_layer = Top_Layer(
input = top_layer_input,#T.concatenate([d_layer1_input,top_layer_input],1),
n_in = 300,
n_out = 2
)
#############################################################################
#L = -T.sum(layer1_input * T.log(T.nnet.sigmoid(d_layer1_input)) + ( 1 - layer1_input) * T.log( 1 - T.nnet.sigmoid(d_layer1_input)) , axis = 1)
# # 交叉熵
# cost = T.mean(L)
# recon_cost = T.nnet.categorical_crossentropy(d_layer1_input, layer1_input).mean()
# L2 = (shiddenLayer.W **2).sum() + (topLayer.W **2).sum()
L2 = T.sum(T.sqr(top_layer.W))
params =[]
for conv_layer in conv_layers:
params += conv_layer.params
L2 += (conv_layer.W **2).sum()
fine_tune_params = top_layer.params + params
return recon_cost, params,top_layer,fine_tune_params,L2
def load_data(batch_size =50):
print "loading data ..."
sentences, vectors,rand_vectors, word_idx_map, _ = cPickle.load(open('dataset.pkl','rb'))
if len(sys.argv) <= 1:
print "usage: please select the mode between '-rand' and '-word2vec' "
mode = '-word2vec'
else:
mode= sys.argv[1]
if mode == '-rand':
print "using the rand vectors"
U = rand_vectors
else:
print "using word2vec vectors"
U = vectors
dataset,testset = make_idx_data_cv(sentences, word_idx_map, cv = 0)
if dataset.shape[0] % batch_size > 0:
add_data_num = batch_size - dataset.shape[0] % batch_size
extra_data = rng.permutation(dataset)[:add_data_num]
dataset = numpy.append(dataset,extra_data,axis = 0)
n_batches = dataset.shape[0] / batch_size
n_train_batches = int(numpy.round(n_batches*0.9))
n_valid_batches = n_batches - n_train_batches
dataset = rng.permutation(dataset)
train_set = dataset[:n_train_batches * batch_size,:]
valid_set = dataset[n_train_batches * batch_size:,:]
input_h = train_set.shape[1] -1
input_w = U.shape[1]
train_x = T.cast(theano.shared(train_set[:,:-1],borrow = True),dtype="int32")
train_y = T.cast(theano.shared(train_set[:,-1] ,borrow = True),dtype="int32")
valid_x = T.cast(theano.shared(valid_set[:,:-1],borrow = True),dtype="int32")
valid_y = T.cast(theano.shared(valid_set[:,-1] ,borrow = True),dtype="int32")
return train_x,train_y,valid_x,valid_y,U,n_train_batches,n_valid_batches,input_h,input_w
def train(batch_size =100,learning_rate = 0.1,epochs = 100):
(
train_x,
train_y,
valid_x,
valid_y,
U,
n_train_batches,
n_valid_batches,
input_h,
input_w
) = load_data(batch_size)
index = T.lscalar('index_real_name')
x = T.imatrix('data')
y = T.ivector('label')
# x,y type is int32
Words = theano.shared(value = U, name = "Words")
layer0_input = Words[x.flatten()].reshape((batch_size,1,input_h,input_w))
cost,params,classifier,c_params,L2 = build_model(
layer0_input = layer0_input,
input_h = input_h,
input_w = input_w,
batch_size = batch_size,
)
#############
# pre_train #
#############
grads = T.grad(cost = cost,wrt = params )
grad_updates = [
(param,param - learning_rate * grad) for param,grad in zip(params,grads)
]
pre_train_model = theano.function(
[index],
cost,
updates = grad_updates,
givens={
x: train_x[index * batch_size : (index + 1) * batch_size]
},
allow_input_downcast = True,
on_unused_input='warn'
)
# val_model = theano.function(
# [index],
# cost,
# givens={
# x: valid_x[index * batch_size: (index + 1) * batch_size]
# },
# allow_input_downcast=True,
# on_unused_input='warn'
# )
#############
# fine_tune #
#############
# self.L2 = (self.hiddenLayer.W **2).sum() + (self.topLayer.W **2).sum()
# 正则化效果不显著
c_cost = classifier.negative_log_likelihood(y) + L2 *0.0
#c_params += [Words]
c_grads = T.grad(cost = c_cost,wrt = c_params )
c_grad_updates = [
(param,param - learning_rate * grad) for param,grad in zip(c_params,c_grads)
]
fine_tune_model = theano.function(
[index],
[c_cost,classifier.errors(y)],
updates = c_grad_updates,
givens={
x: train_x[index * batch_size : (index + 1) * batch_size],
y: train_y[index * batch_size : (index + 1) * batch_size]
},
allow_input_downcast = True,
on_unused_input='warn'
)
val_model = theano.function(
[index],
classifier.errors(y),
givens={
x: valid_x[index * batch_size: (index + 1) * batch_size],
y: valid_y[index * batch_size: (index + 1) * batch_size]
},
allow_input_downcast=True,
on_unused_input='warn'
)
############
#Training #
############
# print 'pre_training...'
# for i in range(5):
# for minibatch_index in rng.permutation(range(n_train_batches)):
# cost = pre_train_model(minibatch_index)
# print 'cost:%2f %%'%(cost)
print 'fine_tune...'
f = open('log/autocode.txt','wb')
epoch = 0
while epoch < epochs :
start_time = time.time()
epoch = epoch + 1
train_losses = []
train_cost = []
# random shuffle (洗牌) the sample ,SGD
for minibatch_index in rng.permutation(range(n_train_batches)):
cost_index,train_errors = fine_tune_model( minibatch_index)
train_losses.append(train_errors)
train_cost.append(cost_index)
val_losses = [val_model(i) for i in xrange(n_valid_batches)]
val_perf = 1- numpy.mean(val_losses)
train_perf = 1- numpy.mean(train_losses)
cost_index = numpy.mean(train_cost)
if train_perf > 0.98 and epoch >25:
break
f.write('%3i,%.2f,%.2f,%.2f\n' % (epoch,val_perf*100, train_perf*100, cost_index*100))
print 'epoch: %3i, training time: %.2f, val perf: %.2f%%, train perf: %.2f%%, cost: %.2f%%' % (epoch, time.time()-start_time, val_perf*100, train_perf*100, cost_index*100)
f.close()
if __name__ == "__main__":
train()
|
import csv
import datetime
import json
import logging
import urlparse
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from io import BytesIO
from django.conf import settings
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.urlresolvers import reverse
from django.core.validators import validate_ipv4_address, validate_ipv46_address
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from crits.campaigns.forms import CampaignForm
from crits.campaigns.campaign import Campaign
from crits.config.config import CRITsConfig
from crits.core import form_consts
from crits.core.class_mapper import class_from_id
from crits.core.crits_mongoengine import EmbeddedSource, EmbeddedCampaign
from crits.core.crits_mongoengine import json_handler, Action
from crits.core.forms import SourceForm, DownloadFileForm
from crits.core.handlers import build_jtable, csv_export, action_add
from crits.core.handlers import jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import datetime_parser
from crits.core.user_tools import is_admin, user_sources
from crits.core.user_tools import is_user_subscribed, is_user_favorite
from crits.domains.domain import Domain
from crits.domains.handlers import upsert_domain, get_valid_root_domain
from crits.events.event import Event
from crits.indicators.forms import IndicatorActivityForm
from crits.indicators.indicator import Indicator
from crits.indicators.indicator import EmbeddedConfidence, EmbeddedImpact
from crits.ips.handlers import ip_add_update, validate_and_normalize_ip
from crits.ips.ip import IP
from crits.notifications.handlers import remove_user_from_notification
from crits.services.handlers import run_triage, get_supported_services
from crits.vocabulary.indicators import (
IndicatorTypes,
IndicatorThreatTypes,
IndicatorAttackTypes
)
from crits.vocabulary.ips import IPTypes
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.status import Status
logger = logging.getLogger(__name__)
def generate_indicator_csv(request):
"""
Generate a CSV file of the Indicator information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Indicator)
return response
def generate_indicator_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Indicator
type_ = "indicator"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Indicators",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': list(mapper['jtopts_fields']),
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
config = CRITsConfig.objects().first()
if not config.splunk_search_url:
del jtopts['fields'][1]
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'All Indicators'",
'text': "'All'",
'click': "function () {$('#indicator_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Indicators'",
'text': "'New'",
'click': "function () {$('#indicator_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Indicators'",
'text': "'In Progress'",
'click': "function () {$('#indicator_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Indicators'",
'text': "'Analyzed'",
'click': "function () {$('#indicator_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Indicators'",
'text': "'Deprecated'",
'click': "function () {$('#indicator_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Indicator'",
'text': "'Add Indicator'",
'click': "function () {$('#new-indicator').click()}",
},
]
if config.splunk_search_url:
for field in jtable['fields']:
if field['fieldname'].startswith("'splunk"):
field['display'] = """ function (data) {
return '<a href="%s' + data.record.value + '"><img src="/new_images/splunk.png" /></a>';
}
""" % config.splunk_search_url
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button': '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def get_indicator_details(indicator_id, analyst):
"""
Generate the data to render the Indicator details template.
:param indicator_id: The ObjectId of the Indicator to get details for.
:type indicator_id: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
users_sources = user_sources(analyst)
indicator = Indicator.objects(id=indicator_id,
source__name__in=users_sources).first()
if not indicator:
error = ("Either this indicator does not exist or you do "
"not have permission to view it.")
template = "error.html"
args = {'error': error}
return template, args
forms = {}
forms['new_activity'] = IndicatorActivityForm(initial={'analyst': analyst,
'date': datetime.datetime.now()})
forms['new_campaign'] = CampaignForm()#'date': datetime.datetime.now(),
forms['new_source'] = SourceForm(analyst, initial={'date': datetime.datetime.now()})
forms['download_form'] = DownloadFileForm(initial={"obj_type": 'Indicator',
"obj_id": indicator_id})
indicator.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, indicator_id, 'Indicator')
# subscription
subscription = {
'type': 'Indicator',
'id': indicator_id,
'subscribed': is_user_subscribed("%s" % analyst,
'Indicator',
indicator_id),
}
# relationship
relationship = {
'type': 'Indicator',
'value': indicator_id,
}
#objects
objects = indicator.sort_objects()
#relationships
relationships = indicator.sort_relationships("%s" % analyst, meta=True)
#comments
comments = {'comments': indicator.get_comments(),
'url_key': indicator_id}
#screenshots
screenshots = indicator.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Indicator', indicator.id)
# services
service_list = get_supported_services('Indicator')
# analysis results
service_results = indicator.get_analysis_results()
#crowdstrike
crowdstrike = {'data' : cs_api(str(indicator['lower']))}
#threatcrowd
try:
if indicator.ind_type:
thetype = indicator.ind_type
if indicator.type:
thetype = indicator.type
except AttributeError:
pass
threatcrowd = {'data' : threatcrowd_api(str(indicator['lower']), thetype)}
#virustotal
virustotal = {'data' : virustotal_api(str(indicator['lower']), thetype)}
#censysIO
censys = {'data' : censys_api(str(indicator['lower']), thetype)}
args = {'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
'subscription': subscription,
"indicator": indicator,
"forms": forms,
"indicator_id": indicator_id,
'screenshots': screenshots,
'service_list': service_list,
'service_results': service_results,
'favorite': favorite,
'rt_url': settings.RT_URL,
'cs' : crowdstrike,
'tc' : threatcrowd,
'vt' : virustotal,
'censys' : censys}
return template, args
def cs_api(indy):
enable = "False"
url = "https://intelapi.crowdstrike.com/indicator/v1/search/indicator?match=" + indy + "&perPage=5&page=1"
params = {'X-CSIX-CUSTID': 'ENTER KEY HERE','X-CSIX-CUSTKEY': 'ENTER KEY HERE'}
cs_results = {}
if enable == "True":
try:
r = requests.get(url, headers=params, verify=False)
thejson = r.json()
for j in thejson:
if j['indicator']: cs_results['Indicator'] = j['indicator']
if j['reports']: cs_results['Reports'] = j['reports'][0]
if j['actors'] : cs_results['Actors'] = j['actors'][0]
if j['malware_families'] : cs_results['Malware'] = j['malware_families'][0]
return cs_results
except Exception, e:
cs_results['error'] = e
return cs_results
else:
cs_results['status'] = 'Disabled'
return cs_results
def threatcrowd_api(indy, indytype):
enable = "False"
indicator = indy
indy_type = indytype
email_url = "https://www.threatcrowd.org/searchApi/v2/email/report/"
domain_url = "https://www.threatcrowd.org/searchApi/v2/domain/report/"
ip_url = "https://www.threatcrowd.org/searchApi/v2/ip/report/"
tc_results = {}
if enable == "True":
try:
if indy_type == "Domain":
r = requests.get(domain_url, params = {"domain" : indicator}, verify=False)
thejson = r.json()
if thejson['response_code'] == "1":
if 'references' in thejson : tc_results['ref'] = thejson['references']
if 'hashes' in thejson : tc_results['hashes'] = thejson['hashes']
if 'emails' in thejson : tc_results['emails'] = thejson['emails']
if 'subdomains' in thejson : tc_results['subdomains'] = thejson['subdomains']
if indy_type == "IPv4 Address":
r = requests.get(ip_url, params = {"ip" : indicator}, verify=False)
thejson = r.json()
if thejson['response_code'] == "1":
if 'references' in thejson : tc_results['ref'] = thejson['references']
if 'hashes' in thejson : tc_results['hashes'] = thejson['hashes']
if 'emails' in thejson : tc_results['emails'] = thejson['emails']
if 'subdomains' in thejson : tc_results['subdomains'] = thejson['subdomains']
if indy_type == "Email Address":
r = requests.get(email_url, params = {"email" : indicator}, verify=False)
thejson = r.json()
if thejson['response_code'] == "1":
if 'references' in thejson : tc_results['ref'] = thejson['references']
if 'hashes' in thejson : tc_results['hashes'] = thejson['hashes']
if 'emails' in thejson : tc_results['emails'] = thejson['emails']
if 'subdomains' in thejson : tc_results['subdomains'] = thejson['subdomains']
if (len(tc_results) > 0) or (len(tc_results) < 2):
tc_results['count'] = 'low'
return tc_results
except Exception, e:
tc_results['error'] = e
return tc_results
else:
tc_results['status'] = 'Disabled'
return tc_results
def virustotal_api(indy, indytype):
enable = "False"
indicator = indy
indy_type = indytype
vt_results = {}
if enable == "True":
try:
if indy_type == "Domain":
dom_url = "https://www.virustotal.com/vtapi/v2/domain/report"
params = {'domain' : indicator, 'apikey' : 'ENTER KEY HERE'}
r = requests.get(dom_url, params = params)
thejson = r.json()
if thejson['response_code'] == 1:
if 'detected_urls' in thejson : vt_results['detected_urls'] = thejson['detected_urls']
if 'detected_downloaded_samples' in thejson : vt_results['detected_downloaded_samples'] = thejson['detected_downloaded_samples']
if 'undetected_downloaded_samples' in thejson : vt_results['undetected_downloaded_samples'] = thejson['undetected_downloaded_samples']
if 'undetected_referrer_samples' in thejson : vt_results['undetected_referrer_samples'] = thejson['undetected_referrer_samples']
if 'detected_communicating_samples' in thejson : vt_results['detected_communicating_samples'] = thejson['detected_communicating_samples']
if indy_type == "IPv4 Address":
ip_url = "https://www.virustotal.com/vtapi/v2/ip-address/report";
params = {'ip' :indicator, 'apikey' : 'ENTER KEY HERE'}
r = requests.get(ip_url, params = params, verify=False)
thejson = r.json()
if thejson['response_code'] == 1:
if 'detected_urls' in thejson : vt_results['detected_urls'] = thejson['detected_urls']
if 'detected_downloaded_samples' in thejson : vt_results['detected_downloaded_samples'] = thejson['detected_downloaded_samples']
if 'undetected_downloaded_samples' in thejson : vt_results['undetected_downloaded_samples'] = thejson['undetected_downloaded_samples']
if 'undetected_referrer_samples' in thejson : vt_results['undetected_referrer_samples'] = thejson['undetected_referrer_samples']
if 'detected_communicating_samples' in thejson : vt_results['detected_communicating_samples'] = thejson['detected_communicating_samples']
if len(vt_results) < 2:
vt_results['count'] = 'low'
return vt_results
except Exception, e:
vt_results['error'] = e
return vt_results
else:
vt_results['status'] = 'Disabled'
return vt_results
def censys_api(indy, indytype):
enable = "False"
indicator = indy
indy_type = indytype
censys_results = {}
API_URL = "https://www.censys.io/api/v1"
UID = "ENTER KEY HERE"
SECRET = "ENTER KEY HERE"
if enable == "True":
if(indy_type == 'Certificate Fingerprint'):
query = {'query': '{s}'.format(s=indicator), 'fields': [
'ip', 'updated_at', '443.https.tls.certificate.parsed.fingerprint_sha1',
'443.https.tls.certificate.parsed.issuer_dn', '443.https.tls.certificate.parsed.subject_dn',
'443.https.tls.certificate.parsed.validity.start', '443.https.tls.certificate.parsed.validity.end',
'443.https.ssl_2.certificate.parsed.validity.end', '443.https.ssl_2.certificate.parsed.validity.start',
'443.https.ssl_2.certificate.parsed.subject_dn', '443.https.ssl_2.certificate.parsed.issuer_dn'],
'flatten': True}
r = requests.post(API_URL + "/search/ipv4", data=json.dumps(query) , auth=(UID, SECRET))
if r.status_code != 200:
censys_results['error'] = str(r.status_code)
else:
thejson = r.json()
if (thejson['status'] == 'ok'):
print thejson['results']
theips = []
for ips in thejson['results']:
theips.append(ips['ip'])
theips.sort()
censys_results['ips'] = theips
censys_results['status'] = 'Enabled'
return censys_results
elif(indy_type == 'IPv4 Address'):
query = {'query': 'ip: {ip}'.format(ip=indicator), 'fields': ['443.https.tls.certificate.parsed.fingerprint_sha1',
'443.https.tls.certificate.parsed.issuer_dn',
'443.https.tls.certificate.parsed.subject_dn',
'updated_at',
'443.https.ssl_2.certificate.parsed.fingerprint_sha1',
'443.https.ssl_2.certificate.parsed.issuer_dn',
'443.https.ssl_2.certificate.parsed.subject_dn'],
'flatten': True}
r = requests.post(API_URL + "/search/ipv4", data=json.dumps(query) , auth=(UID, SECRET))
if r.status_code != 200:
censys_results['error'] = str(r.status_code)
else:
thejson = r.json()
if (thejson['status'] == 'ok'):
print thejson['results']
for certs in thejson['results']:
if '443.https.tls.certificate.parsed.fingerprint_sha1' in certs :
censys_results['certhash'] = "Sha1 : " + certs['443.https.tls.certificate.parsed.fingerprint_sha1'][0]
censys_results['status'] = 'Enabled'
if '443.https.tls.certificate.parsed.issuer_dn' in certs :
censys_results['certinfo'] = "Issuer : " + certs['443.https.tls.certificate.parsed.issuer_dn'][0]
censys_results['status'] = 'Enabled'
return censys_results
else:
return censys_results
def get_indicator_type_value_pair(field):
"""
Extracts the type/value pair from a generic field. This is generally used on
fields that can become indicators such as objects or email fields.
The type/value pairs are used in indicator relationships
since indicators are uniquely identified via their type/value pair.
This function can be used in conjunction with:
crits.indicators.handlers.does_indicator_relationship_exist
Args:
field: The input field containing a type/value pair. This field is
generally from custom dictionaries such as from Django templates.
Returns:
Returns true if the input field already has an indicator associated
with its values. Returns false otherwise.
"""
# this is an object
if field.get("type") != None and field.get("value") != None:
return (field.get("type"), field.get("value").lower().strip())
# this is an email field
if field.get("field_type") != None and field.get("field_value") != None:
return (field.get("field_type"), field.get("field_value").lower().strip())
# otherwise the logic to extract the type/value pair from this
# specific field type is not supported
return (None, None)
def get_verified_field(data, valid_values, field=None, default=None):
"""
Validate and correct string value(s) in a dictionary key or list,
or a string by itself.
:param data: The data to be verified and corrected.
:type data: dict, list of strings, or str
:param valid_values: Key with simplified string, value with actual string
:type valid_values: dict
:param field: The dictionary key containing the data.
:type field: str
:param default: A value to use if an invalid item cannot be corrected
:type default: str
:returns: the validated/corrected value(str), list of values(list) or ''
"""
if isinstance(data, dict):
data = data.get(field, '')
if isinstance(data, list):
value_list = data
else:
value_list = [data]
for i, item in enumerate(value_list):
if isinstance(item, basestring):
item = item.lower().strip().replace(' - ', '-')
if item in valid_values:
value_list[i] = valid_values[item]
continue
if default is not None:
item = default
continue
return ''
if isinstance(data, list):
return value_list
else:
return value_list[0]
def handle_indicator_csv(csv_data, source, method, reference, ctype, username,
add_domain=False, related_id=None, related_type=None, relationship_type=None):
"""
Handle adding Indicators in CSV format (file or blob).
:param csv_data: The CSV data.
:type csv_data: str or file handle
:param source: The name of the source for these indicators.
:type source: str
:param method: The method of acquisition of this indicator.
:type method: str
:param reference: The reference to this data.
:type reference: str
:param ctype: The CSV type.
:type ctype: str ("file" or "blob")
:param username: The user adding these indicators.
:type username: str
:param add_domain: If the indicators being added are also other top-level
objects, add those too.
:type add_domain: boolean
:param related_id: ID for object to create relationship with
:type related_id: str
:param related_type: Type of object to create relationship with
:type related_type: str
:param relationship_type: Type of relationship to create
:type relationship_type: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if ctype == "file":
cdata = csv_data.read()
else:
cdata = csv_data.encode('ascii')
data = csv.DictReader(BytesIO(cdata), skipinitialspace=True)
result = {'success': True}
result_message = ""
# Compute permitted values in CSV
valid_ratings = {
'unknown': 'unknown',
'benign': 'benign',
'low': 'low',
'medium': 'medium',
'high': 'high'}
valid_campaign_confidence = {
'low': 'low',
'medium': 'medium',
'high': 'high'}
valid_campaigns = {}
for c in Campaign.objects(active='on'):
valid_campaigns[c['name'].lower().replace(' - ', '-')] = c['name']
valid_actions = {}
for a in Action.objects(active='on'):
valid_actions[a['name'].lower().replace(' - ', '-')] = a['name']
valid_ind_types = {}
for obj in IndicatorTypes.values(sort=True):
valid_ind_types[obj.lower().replace(' - ', '-')] = obj
# Start line-by-line import
msg = "Cannot process row %s: %s<br />"
added = 0
for processed, d in enumerate(data, 1):
ind = {}
ind['value'] = (d.get('Indicator') or '').strip()
ind['lower'] = (d.get('Indicator') or '').lower().strip()
ind['description'] = (d.get('Description') or '').strip()
ind['type'] = get_verified_field(d, valid_ind_types, 'Type')
ind['threat_types'] = d.get('Threat Type',
IndicatorThreatTypes.UNKNOWN).split(',')
ind['attack_types'] = d.get('Attack Type',
IndicatorAttackTypes.UNKNOWN).split(',')
if not ind['threat_types']:
ind['threat_types'] = [IndicatorThreatTypes.UNKNOWN]
for t in ind['threat_types']:
if t not in IndicatorThreatTypes.values():
result['success'] = False
result_message += msg % (processed + 1, "Invalid Threat Type: %s" % t)
continue
if not ind['attack_types']:
ind['attack_types'] = [IndicatorAttackTypes.UNKNOWN]
for a in ind['attack_types']:
if a not in IndicatorAttackTypes.values():
result['success'] = False
result_message += msg % (processed + 1, "Invalid Attack Type:%s" % a)
continue
ind['status'] = d.get('Status', Status.NEW)
if not ind['value'] or not ind['type']:
# Mandatory value missing or malformed, cannot process csv row
i = ""
result['success'] = False
if not ind['value']:
i += "No valid Indicator value "
if not ind['type']:
i += "No valid Indicator type "
result_message += msg % (processed + 1, i)
continue
campaign = get_verified_field(d, valid_campaigns, 'Campaign')
if campaign:
ind['campaign'] = campaign
ind['campaign_confidence'] = get_verified_field(d, valid_campaign_confidence,
'Campaign Confidence',
default='low')
actions = d.get('Action', '')
if actions:
actions = get_verified_field(actions.split(','), valid_actions)
if not actions:
result['success'] = False
result_message += msg % (processed + 1, "Invalid Action")
continue
ind['confidence'] = get_verified_field(d, valid_ratings, 'Confidence',
default='unknown')
ind['impact'] = get_verified_field(d, valid_ratings, 'Impact',
default='unknown')
ind[form_consts.Common.BUCKET_LIST_VARIABLE_NAME] = d.get(form_consts.Common.BUCKET_LIST, '')
ind[form_consts.Common.TICKET_VARIABLE_NAME] = d.get(form_consts.Common.TICKET, '')
try:
response = handle_indicator_insert(ind, source, reference,
analyst=username, method=method,
add_domain=add_domain, related_id=related_id,
related_type=related_type, relationship_type=relationship_type)
except Exception, e:
result['success'] = False
result_message += msg % (processed + 1, e)
continue
if response['success']:
if actions:
action = {'active': 'on',
'analyst': username,
'begin_date': '',
'end_date': '',
'performed_date': '',
'reason': '',
'date': datetime.datetime.now()}
for action_type in actions:
action['action_type'] = action_type
action_add('Indicator', response.get('objectid'), action,
user=username)
else:
result['success'] = False
result_message += msg % (processed + 1, response['message'])
continue
added += 1
if processed < 1:
result['success'] = False
result_message = "Could not find any valid CSV rows to parse!"
result['message'] = "Successfully added %s Indicator(s).<br />%s" % (added, result_message)
return result
def handle_indicator_ind(value, source, ctype, threat_type, attack_type,
analyst, method='', reference='',
add_domain=False, add_relationship=False, campaign=None,
campaign_confidence=None, confidence=None,
description=None, impact=None,
bucket_list=None, ticket=None, cache={},
related_id=None, related_type=None, relationship_type=None):
"""
Handle adding an individual indicator.
:param value: The indicator value.
:type value: str
:param source: The name of the source for this indicator.
:type source: str
:param ctype: The indicator type.
:type ctype: str
:param threat_type: The indicator threat type.
:type threat_type: str
:param attack_type: The indicator attack type.
:type attack_type: str
:param analyst: The user adding this indicator.
:type analyst: str
:param method: The method of acquisition of this indicator.
:type method: str
:param reference: The reference to this data.
:type reference: str
:param add_domain: If the indicators being added are also other top-level
objects, add those too.
:type add_domain: boolean
:param add_relationship: If a relationship can be made, create it.
:type add_relationship: boolean
:param campaign: Campaign to attribute to this indicator.
:type campaign: str
:param campaign_confidence: Confidence of this campaign.
:type campaign_confidence: str
:param confidence: Indicator confidence.
:type confidence: str
:param description: The description of this data.
:type description: str
:param impact: Indicator impact.
:type impact: str
:param bucket_list: The bucket(s) to assign to this indicator.
:type bucket_list: str
:param ticket: Ticket to associate with this indicator.
:type ticket: str
:param cache: Cached data, typically for performance enhancements
during bulk uperations.
:type cache: dict
:param related_id: ID for object to create relationship with
:type cache: str
:param related_type: Type of object to create relationship with
:type cache: str
:param relationship_type: Type of relationship to create
:type cache: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
result = None
if not source:
return {"success" : False, "message" : "Missing source information."}
if threat_type is None:
threat_type = IndicatorThreatTypes.UNKNOWN
if attack_type is None:
attack_type = IndicatorAttackTypes.UNKNOWN
if description is None:
description = ''
if value == None or value.strip() == "":
result = {'success': False,
'message': "Can't create indicator with an empty value field"}
elif ctype == None or ctype.strip() == "":
result = {'success': False,
'message': "Can't create indicator with an empty type field"}
else:
ind = {}
ind['type'] = ctype.strip()
ind['threat_types'] = [threat_type.strip()]
ind['attack_types'] = [attack_type.strip()]
ind['value'] = value.strip()
ind['lower'] = value.lower().strip()
ind['description'] = description.strip()
if campaign:
ind['campaign'] = campaign
if campaign_confidence and campaign_confidence in ('low', 'medium', 'high'):
ind['campaign_confidence'] = campaign_confidence
if confidence and confidence in ('unknown', 'benign', 'low', 'medium',
'high'):
ind['confidence'] = confidence
if impact and impact in ('unknown', 'benign', 'low', 'medium', 'high'):
ind['impact'] = impact
if bucket_list:
ind[form_consts.Common.BUCKET_LIST_VARIABLE_NAME] = bucket_list
if ticket:
ind[form_consts.Common.TICKET_VARIABLE_NAME] = ticket
try:
return handle_indicator_insert(ind, source, reference, analyst,
method, add_domain, add_relationship, cache=cache,
related_id=related_id, related_type=related_type,
relationship_type=relationship_type)
except Exception, e:
return {'success': False, 'message': repr(e)}
return result
def handle_indicator_insert(ind, source, reference='', analyst='', method='',
add_domain=False, add_relationship=False, cache={},
related_id=None, related_type=None, relationship_type=None):
"""
Insert an individual indicator into the database.
NOTE: Setting add_domain to True will always create a relationship as well.
However, to create a relationship with an object that already exists before
this function was called, set add_relationship to True. This will assume
that the domain or IP object to create the relationship with already exists
and will avoid infinite mutual calls between, for example, add_update_ip
and this function. add domain/IP objects.
:param ind: Information about the indicator.
:type ind: dict
:param source: The source for this indicator.
:type source: list, str, :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param reference: The reference to the data.
:type reference: str
:param analyst: The user adding this indicator.
:type analyst: str
:param method: Method of acquiring this indicator.
:type method: str
:param add_domain: If this indicator is also a top-level object, try to add
it.
:type add_domain: boolean
:param add_relationship: Attempt to add relationships if applicable.
:type add_relationship: boolean
:param cache: Cached data, typically for performance enhancements
during bulk uperations.
:type cache: dict
:param related_id: ID for object to create relationship with
:type cache: str
:param related_type: Type of object to create relationship with
:type cache: str
:param relationship_type: Type of relationship to create
:type cache: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed,
"objectid" (str) if successful,
"is_new_indicator" (boolean) if successful.
"""
if ind['type'] not in IndicatorTypes.values():
return {'success': False,
'message': "Not a valid Indicator Type: %s" % ind['type']}
for t in ind['threat_types']:
if t not in IndicatorThreatTypes.values():
return {'success': False,
'message': "Not a valid Indicator Threat Type: %s" % t}
for a in ind['attack_types']:
if a not in IndicatorAttackTypes.values():
return {'success': False,
'message': "Not a valid Indicator Attack Type: " % a}
(ind['value'], error) = validate_indicator_value(ind['value'], ind['type'])
if error:
return {"success": False, "message": error}
is_new_indicator = False
dmain = None
ip = None
rank = {
'unknown': 0,
'benign': 1,
'low': 2,
'medium': 3,
'high': 4,
}
if ind.get('status', None) is None or len(ind.get('status', '')) < 1:
ind['status'] = Status.NEW
indicator = Indicator.objects(ind_type=ind['type'],
lower=ind['lower']).first()
if not indicator:
indicator = Indicator()
indicator.ind_type = ind.get('type')
indicator.threat_types = ind.get('threat_types',
IndicatorThreatTypes.UNKNOWN)
indicator.attack_types = ind.get('attack_types',
IndicatorAttackTypes.UNKNOWN)
indicator.value = ind.get('value')
indicator.lower = ind.get('lower')
indicator.description = ind.get('description', '')
indicator.created = datetime.datetime.now()
indicator.confidence = EmbeddedConfidence(analyst=analyst)
indicator.impact = EmbeddedImpact(analyst=analyst)
indicator.status = ind.get('status')
is_new_indicator = True
else:
if ind['status'] != Status.NEW:
indicator.status = ind['status']
add_desc = "\nSeen on %s as: %s" % (str(datetime.datetime.now()),
ind['value'])
if not indicator.description:
indicator.description = ind.get('description', '') + add_desc
elif indicator.description != ind['description']:
indicator.description += "\n" + ind.get('description', '') + add_desc
else:
indicator.description += add_desc
indicator.add_threat_type_list(ind.get('threat_types',
IndicatorThreatTypes.UNKNOWN),
analyst,
append=True)
indicator.add_attack_type_list(ind.get('attack_types',
IndicatorAttackTypes.UNKNOWN),
analyst,
append=True)
if 'campaign' in ind:
if isinstance(ind['campaign'], basestring) and len(ind['campaign']) > 0:
confidence = ind.get('campaign_confidence', 'low')
ind['campaign'] = EmbeddedCampaign(name=ind['campaign'],
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
if isinstance(ind['campaign'], EmbeddedCampaign):
indicator.add_campaign(ind['campaign'])
elif isinstance(ind['campaign'], list):
for campaign in ind['campaign']:
if isinstance(campaign, EmbeddedCampaign):
indicator.add_campaign(campaign)
if 'confidence' in ind and rank.get(ind['confidence'], 0) > rank.get(indicator.confidence.rating, 0):
indicator.confidence.rating = ind['confidence']
indicator.confidence.analyst = analyst
if 'impact' in ind and rank.get(ind['impact'], 0) > rank.get(indicator.impact.rating, 0):
indicator.impact.rating = ind['impact']
indicator.impact.analyst = analyst
bucket_list = None
if form_consts.Common.BUCKET_LIST_VARIABLE_NAME in ind:
bucket_list = ind[form_consts.Common.BUCKET_LIST_VARIABLE_NAME]
if bucket_list:
indicator.add_bucket_list(bucket_list, analyst)
ticket = None
if form_consts.Common.TICKET_VARIABLE_NAME in ind:
ticket = ind[form_consts.Common.TICKET_VARIABLE_NAME]
if ticket:
indicator.add_ticket(ticket, analyst)
# generate new source information and add to indicator
if isinstance(source, basestring) and source:
indicator.add_source(source=source, method=method,
reference=reference, analyst=analyst)
elif isinstance(source, EmbeddedSource):
indicator.add_source(source_item=source, method=method,
reference=reference)
elif isinstance(source, list):
for s in source:
if isinstance(s, EmbeddedSource):
indicator.add_source(source_item=s, method=method,
reference=reference)
if add_domain or add_relationship:
ind_type = indicator.ind_type
ind_value = indicator.lower
url_contains_ip = False
if ind_type in (IndicatorTypes.DOMAIN,
IndicatorTypes.URI):
if ind_type == IndicatorTypes.URI:
domain_or_ip = urlparse.urlparse(ind_value).hostname
try:
validate_ipv46_address(domain_or_ip)
url_contains_ip = True
except (DjangoValidationError, TypeError):
pass
else:
domain_or_ip = ind_value
if not url_contains_ip and domain_or_ip:
success = None
if add_domain:
success = upsert_domain(domain_or_ip,
indicator.source,
username='%s' % analyst,
campaign=indicator.campaign,
bucket_list=bucket_list,
cache=cache)
if not success['success']:
return {'success': False, 'message': success['message']}
if not success or not 'object' in success:
dmain = Domain.objects(domain=domain_or_ip).first()
else:
dmain = success['object']
if ind_type in IPTypes.values() or url_contains_ip:
if url_contains_ip:
ind_value = domain_or_ip
try:
validate_ipv4_address(domain_or_ip)
ind_type = IndicatorTypes.IPV4_ADDRESS
except DjangoValidationError:
ind_type = IndicatorTypes.IPV6_ADDRESS
success = None
if add_domain:
success = ip_add_update(ind_value,
ind_type,
source=indicator.source,
campaign=indicator.campaign,
analyst=analyst,
bucket_list=bucket_list,
ticket=ticket,
indicator_reference=reference,
cache=cache)
if not success['success']:
return {'success': False, 'message': success['message']}
if not success or not 'object' in success:
ip = IP.objects(ip=indicator.value).first()
else:
ip = success['object']
indicator.save(username=analyst)
if dmain:
dmain.add_relationship(indicator,
RelationshipTypes.RELATED_TO,
analyst="%s" % analyst,
get_rels=False)
dmain.save(username=analyst)
if ip:
ip.add_relationship(indicator,
RelationshipTypes.RELATED_TO,
analyst="%s" % analyst,
get_rels=False)
ip.save(username=analyst)
# Code for the "Add Related " Dropdown
related_obj = None
if related_id:
related_obj = class_from_id(related_type, related_id)
if not related_obj:
return {'success': False,
'message': 'Related Object not found.'}
indicator.save(username=analyst)
if related_obj and indicator and relationship_type:
relationship_type=RelationshipTypes.inverse(relationship=relationship_type)
indicator.add_relationship(related_obj,
relationship_type,
analyst=analyst,
get_rels=False)
indicator.save(username=analyst)
# run indicator triage
if is_new_indicator:
indicator.reload()
run_triage(indicator, analyst)
return {'success': True, 'objectid': str(indicator.id),
'is_new_indicator': is_new_indicator, 'object': indicator}
def does_indicator_relationship_exist(field, indicator_relationships):
"""
Checks if the input field's values already have an indicator
by cross checking against the list of indicator relationships. The input
field already has an associated indicator created if the input field's
"type" and "value" pairs exist -- since indicators are uniquely identified
by their type/value pair.
Args:
field: The generic input field containing a type/value pair. This is
checked against a list of indicators relationships to see if a
corresponding indicator already exists. This field is generally
from custom dictionaries such as from Django templates.
indicator_relationships: The list of indicator relationships
to cross reference the input field against.
Returns:
Returns true if the input field already has an indicator associated
with its values. Returns false otherwise.
"""
type, value = get_indicator_type_value_pair(field)
if indicator_relationships != None:
if type != None and value != None:
for indicator_relationship in indicator_relationships:
if indicator_relationship == None:
logger.error('Indicator relationship is not valid: ' +
str(indicator_relationship))
continue
if type == indicator_relationship.get('ind_type') and value == indicator_relationship.get('ind_value'):
return True
else:
logger.error('Could not extract type/value pair of input field' +
'type: ' + str(type) +
'value: ' + (value.encode("utf-8") if value else str(value)) +
'indicator_relationships: ' + str(indicator_relationships))
return False
def ci_search(itype, confidence, impact, actions):
"""
Find indicators based on type, confidence, impact, and/or actions.
:param itype: The indicator type to search for.
:type itype: str
:param confidence: The confidence level(s) to search for.
:type confidence: str
:param impact: The impact level(s) to search for.
:type impact: str
:param actions: The action(s) to search for.
:type actions: str
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
query = {}
if confidence:
item_list = confidence.replace(' ', '').split(',')
query["confidence.rating"] = {"$in": item_list}
if impact:
item_list = impact.replace(' ', '').split(',')
query["impact.rating"] = {"$in": item_list}
if actions:
item_list = actions.split(',')
query["actions.action_type"] = {"$in": item_list}
query["type"] = "%s" % itype.strip()
result_filter = ('type', 'value', 'confidence', 'impact', 'actions')
results = Indicator.objects(__raw__=query).only(*result_filter)
return results
def set_indicator_type(indicator_id, itype, username):
"""
Set the Indicator type.
:param indicator_id: The ObjectId of the indicator to update.
:type indicator_id: str
:param itype: The new indicator type.
:type itype: str
:param username: The user updating the indicator.
:type username: str
:returns: dict with key "success" (boolean)
"""
# check to ensure we're not duping an existing indicator
indicator = Indicator.objects(id=indicator_id).first()
value = indicator.value
ind_check = Indicator.objects(ind_type=itype, value=value).first()
if ind_check:
# we found a dupe
return {'success': False}
else:
try:
indicator.ind_type = itype
indicator.save(username=username)
return {'success': True}
except ValidationError:
return {'success': False}
def modify_threat_types(id_, threat_types, user, **kwargs):
"""
Set the Indicator threat types.
:param indicator_id: The ObjectId of the indicator to update.
:type indicator_id: str
:param threat_types: The new indicator threat types.
:type threat_types: list,str
:param user: The user updating the indicator.
:type user: str
:returns: dict with key "success" (boolean)
"""
indicator = Indicator.objects(id=id_).first()
if isinstance(threat_types, basestring):
threat_types = threat_types.split(',')
for t in threat_types:
if t not in IndicatorThreatTypes.values():
return {'success': False,
'message': "Not a valid Threat Type: %s" % t}
try:
indicator.add_threat_type_list(threat_types, user, append=False)
indicator.save(username=user)
return {'success': True}
except ValidationError:
return {'success': False}
def modify_attack_types(id_, attack_types, user, **kwargs):
"""
Set the Indicator attack type.
:param indicator_id: The ObjectId of the indicator to update.
:type indicator_id: str
:param attack_types: The new indicator attack types.
:type attack_type: list,str
:param user: The user updating the indicator.
:type user: str
:returns: dict with key "success" (boolean)
"""
indicator = Indicator.objects(id=id_).first()
if isinstance(attack_types, basestring):
attack_types = attack_types.split(',')
for a in attack_types:
if a not in IndicatorAttackTypes.values():
return {'success': False,
'message': "Not a valid Attack Type: %s" % a}
try:
indicator.add_attack_type_list(attack_types, user, append=False)
indicator.save(username=user)
return {'success': True}
except ValidationError:
return {'success': False}
def indicator_remove(_id, username):
"""
Remove an Indicator from CRITs.
:param _id: The ObjectId of the indicator to remove.
:type _id: str
:param username: The user removing the indicator.
:type username: str
:returns: dict with keys "success" (boolean) and "message" (list) if failed.
"""
if is_admin(username):
indicator = Indicator.objects(id=_id).first()
if indicator:
indicator.delete(username=username)
return {'success': True}
else:
return {'success': False, 'message': ['Cannot find Indicator']}
else:
return {'success': False, 'message': ['Must be an admin to delete']}
def activity_add(id_, activity, user, **kwargs):
"""
Add activity to an Indicator.
:param id_: The ObjectId of the indicator to update.
:type id_: str
:param activity: The activity information.
:type activity: dict
:param user: The user adding the activitty.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed,
"object" (dict) if successful.
"""
sources = user_sources(user)
indicator = Indicator.objects(id=id_,
source__name__in=sources).first()
if not indicator:
return {'success': False,
'message': 'Could not find Indicator'}
try:
activity['analyst'] = user
indicator.add_activity(activity['analyst'],
activity['start_date'],
activity['end_date'],
activity['description'],
activity['date'])
indicator.save(username=user)
return {'success': True, 'object': activity,
'id': str(indicator.id)}
except ValidationError, e:
return {'success': False, 'message': e,
'id': str(indicator.id)}
def activity_update(id_, activity, user=None, **kwargs):
"""
Update activity for an Indicator.
:param id_: The ObjectId of the indicator to update.
:type id_: str
:param activity: The activity information.
:type activity: dict
:param user: The user updating the activity.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str) if failed,
"object" (dict) if successful.
"""
sources = user_sources(user)
indicator = Indicator.objects(id=id_,
source__name__in=sources).first()
if not indicator:
return {'success': False,
'message': 'Could not find Indicator'}
try:
activity = datetime_parser(activity)
activity['analyst'] = user
indicator.edit_activity(activity['analyst'],
activity['start_date'],
activity['end_date'],
activity['description'],
activity['date'])
indicator.save(username=user)
return {'success': True, 'object': activity}
except ValidationError, e:
return {'success': False, 'message': e}
def activity_remove(id_, date, user, **kwargs):
"""
Remove activity from an Indicator.
:param id_: The ObjectId of the indicator to update.
:type id_: str
:param date: The date of the activity to remove.
:type date: datetime.datetime
:param user: The user removing this activity.
:type user: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
indicator = Indicator.objects(id=id_).first()
if not indicator:
return {'success': False,
'message': 'Could not find Indicator'}
try:
date = datetime_parser(date)
indicator.delete_activity(date)
indicator.save(username=user)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': e}
def ci_update(id_, ci_type, value, user, **kwargs):
"""
Update confidence or impact for an indicator.
:param id_: The ObjectId of the indicator to update.
:type id_: str
:param ci_type: What we are updating.
:type ci_type: str ("confidence" or "impact")
:param value: The value to set.
:type value: str ("unknown", "benign", "low", "medium", "high")
:param user: The user updating this indicator.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
indicator = Indicator.objects(id=id_).first()
if not indicator:
return {'success': False,
'message': 'Could not find Indicator'}
if ci_type == "confidence" or ci_type == "impact":
try:
if ci_type == "confidence":
indicator.set_confidence(user, value)
else:
indicator.set_impact(user, value)
indicator.save(username=user)
return {'success': True}
except ValidationError, e:
return {'success': False, "message": e}
else:
return {'success': False, 'message': 'Invalid CI type'}
def create_indicator_and_ip(type_, id_, ip, analyst):
"""
Add indicators for an IP address.
:param type_: The CRITs top-level object we are getting this IP from.
:type type_: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param id_: The ObjectId of the top-level object to search for.
:type id_: str
:param ip: The IP address to generate an indicator out of.
:type ip: str
:param analyst: The user adding this indicator.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"value" (str)
"""
obj_class = class_from_id(type_, id_)
if obj_class:
ip_class = IP.objects(ip=ip).first()
ind_type = IPTypes.IPV4_ADDRESS
ind_class = Indicator.objects(ind_type=ind_type, value=ip).first()
# setup IP
if ip_class:
ip_class.add_relationship(obj_class,
RelationshipTypes.RELATED_TO,
analyst=analyst)
else:
ip_class = IP()
ip_class.ip = ip
ip_class.source = obj_class.source
ip_class.save(username=analyst)
ip_class.add_relationship(obj_class,
RelationshipTypes.RELATED_TO,
analyst=analyst)
# setup Indicator
message = ""
if ind_class:
message = ind_class.add_relationship(obj_class,
RelationshipTypes.RELATED_TO,
analyst=analyst)
ind_class.add_relationship(ip_class,
RelationshipTypes.RELATED_TO,
analyst=analyst)
else:
ind_class = Indicator()
ind_class.source = obj_class.source
ind_class.ind_type = ind_type
ind_class.value = ip
ind_class.save(username=analyst)
message = ind_class.add_relationship(obj_class,
RelationshipTypes.RELATED_TO,
analyst=analyst)
ind_class.add_relationship(ip_class,
RelationshipTypes.RELATED_TO,
analyst=analyst)
# save
try:
obj_class.save(username=analyst)
ip_class.save(username=analyst)
ind_class.save(username=analyst)
if message['success']:
rels = obj_class.sort_relationships("%s" % analyst, meta=True)
return {'success': True, 'message': rels, 'value': obj_class.id}
else:
return {'success': False, 'message': message['message']}
except Exception, e:
return {'success': False, 'message': e}
else:
return {'success': False,
'message': "Could not find %s to add relationships" % type_}
def create_indicator_from_tlo(tlo_type, tlo, analyst, source_name=None,
tlo_id=None, ind_type=None, value=None,
update_existing=True, add_domain=True):
"""
Create an indicator from a Top-Level Object (TLO).
:param tlo_type: The CRITs type of the parent TLO.
:type tlo_type: str
:param tlo: A CRITs parent TLO class object
:type tlo: class - some CRITs TLO
:param analyst: The user creating this indicator.
:type analyst: str
:param source_name: The source name for the new source instance that
records this indicator being added.
:type source_name: str
:param tlo_id: The ObjectId of the parent TLO.
:type tlo_id: str
:param ind_type: The indicator type, if TLO is not Domain or IP.
:type ind_type: str
:param value: The value of the indicator, if TLO is not Domain or IP.
:type value: str
:param update_existing: If Indicator already exists, update it
:type update_existing: boolean
:param add_domain: If new indicator contains a domain/ip, add a
matching Domain or IP TLO
:type add_domain: boolean
:returns: dict with keys:
"success" (boolean),
"message" (str),
"value" (str),
"indicator" :class:`crits.indicators.indicator.Indicator`
"""
if not tlo:
tlo = class_from_id(tlo_type, tlo_id)
if not tlo:
return {'success': False,
'message': "Could not find %s" % tlo_type}
source = tlo.source
campaign = tlo.campaign
bucket_list = tlo.bucket_list
tickets = tlo.tickets
# If value and ind_type provided, use them instead of defaults
if tlo_type == "Domain":
value = value or tlo.domain
ind_type = ind_type or IndicatorTypes.DOMAIN
elif tlo_type == "IP":
value = value or tlo.ip
ind_type = ind_type or tlo.ip_type
elif tlo_type == "Indicator":
value = value or tlo.value
ind_type = ind_type or tlo.ind_type
if not value or not ind_type: # if not provided & no default
return {'success': False,
'message': "Indicator value & type must be provided"
"for TLO of type %s" % tlo_type}
#check if indicator already exists
if Indicator.objects(ind_type=ind_type,
value=value).first() and not update_existing:
return {'success': False, 'message': "Indicator already exists"}
result = handle_indicator_ind(value, source,
ctype=ind_type,
threat_type=IndicatorThreatTypes.UNKNOWN,
attack_type=IndicatorAttackTypes.UNKNOWN,
analyst=analyst,
add_domain=add_domain,
add_relationship=True,
campaign=campaign,
bucket_list=bucket_list,
ticket=tickets)
if result['success']:
ind = Indicator.objects(id=result['objectid']).first()
if ind:
if source_name:
# add source to show when indicator was created/updated
ind.add_source(source=source_name,
method= 'Indicator created/updated ' \
'from %s with ID %s' % (tlo_type, tlo.id),
date=datetime.datetime.now(),
analyst = analyst)
tlo.add_relationship(ind,
RelationshipTypes.RELATED_TO,
analyst=analyst)
tlo.save(username=analyst)
for rel in tlo.relationships:
if rel.rel_type == "Event":
# Get event object to pass in.
rel_item = Event.objects(id=rel.object_id).first()
if rel_item:
ind.add_relationship(rel_item,
RelationshipTypes.RELATED_TO,
analyst=analyst)
ind.save(username=analyst)
tlo.reload()
rels = tlo.sort_relationships("%s" % analyst, meta=True)
return {'success': True, 'message': rels,
'value': tlo.id, 'indicator': ind}
else:
return {'success': False, 'message': "Failed to create Indicator"}
else:
return result
def validate_indicator_value(value, ind_type):
"""
Check that a given value is valid for a particular Indicator type.
:param value: The value to be validated
:type value: str
:param ind_type: The indicator type to validate against
:type ind_type: str
:returns: tuple: (Valid value, Error message)
"""
value = value.strip()
domain = ""
# URL
if ind_type == IndicatorTypes.URI and "://" in value.split('.')[0]:
domain_or_ip = urlparse.urlparse(value).hostname
try:
validate_ipv46_address(domain_or_ip)
return (value, "")
except DjangoValidationError:
domain = domain_or_ip
# Email address
if ind_type in (IndicatorTypes.EMAIL_ADDRESS,
IndicatorTypes.EMAIL_FROM,
IndicatorTypes.EMAIL_REPLY_TO,
IndicatorTypes.EMAIL_SENDER):
if '@' not in value:
return ("", "Email address must contain an '@'")
domain_or_ip = value.split('@')[-1]
if domain_or_ip[0] == '[' and domain_or_ip[-1] == ']':
try:
validate_ipv46_address(domain_or_ip[1:-1])
return (value, "")
except DjangoValidationError:
return ("", "Email address does not contain a valid IP")
else:
domain = domain_or_ip
# IPs
if ind_type in IPTypes.values():
(ip_address, error) = validate_and_normalize_ip(value, ind_type)
if error:
return ("", error)
else:
return (ip_address, "")
# Domains
if ind_type == IndicatorTypes.DOMAIN or domain:
(root, domain, error) = get_valid_root_domain(domain or value)
if error:
return ("", error)
else:
return (value, "")
return (value, "")
|
from setuptools import setup
import opterator
setup(
name="opterator",
version=opterator.__version__,
py_modules=['opterator', 'test_opterator'],
author="Dusty Phillips",
author_email="dusty@buchuki.com",
license="MIT",
keywords="opterator option parse parser options",
url="http://github.com/buchuki/opterator/",
description="Easy option parsing introspected from function signature.",
download_url="https://github.com/buchuki/opterator/archive/%s.tar.gz" % opterator.__version__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
)
|
import config
import re
import unittest
class ConfigTest(unittest.TestCase):
@classmethod
def add_validate_categories_test(cls, cfg):
def test(self):
# Categories must have underscores instead of spaces.
self.assertNotIn(' ', cfg.hidden_category)
self.assertNotIn(' ', cfg.citation_needed_category)
name = 'test_' + cfg.lang_code + '_category_names_underscores'
setattr(cls, name, test)
@classmethod
def add_validate_templates_test(cls, cfg):
def test(self):
# Templates should contain spaces, not underscores.
for tpl in cfg.citation_needed_templates:
self.assertNotIn('_', tpl)
setattr(cls, 'test_' + cfg.lang_code + '_template_names_spaces', test)
@classmethod
def add_validate_wikipedia_domain_test(cls, cfg):
def test(self):
self.assertTrue(re.match('^[a-z]+.wikipedia.org$',
cfg.wikipedia_domain))
setattr(cls, 'test_' + cfg.lang_code + '_wikipedia_domain', test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
ConfigTest.add_validate_categories_test(cfg)
ConfigTest.add_validate_templates_test(cfg)
ConfigTest.add_validate_wikipedia_domain_test(cfg)
unittest.main()
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maji_sys.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
import random
import weakref
from redis.client import Redis
from redis.connection import ConnectionPool, Connection
from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError,
TimeoutError)
from redis._compat import iteritems, nativestr, xrange
class MasterNotFoundError(ConnectionError):
pass
class SlaveNotFoundError(ConnectionError):
pass
class SentinelManagedConnection(Connection):
def __init__(self, **kwargs):
self.connection_pool = kwargs.pop('connection_pool')
super(SentinelManagedConnection, self).__init__(**kwargs)
def __repr__(self):
pool = self.connection_pool
s = '%s<service=%s%%s>' % (type(self).__name__, pool.service_name)
if self.host:
host_info = ',host=%s,port=%s' % (self.host, self.port)
s = s % host_info
return s
def connect_to(self, address):
self.host, self.port = address
super(SentinelManagedConnection, self).connect()
if self.connection_pool.check_connection:
self.send_command('PING')
if nativestr(self.read_response()) != 'PONG':
raise ConnectionError('PING failed')
def connect(self):
if self._sock:
return # already connected
if self.connection_pool.is_master:
self.connect_to(self.connection_pool.get_master_address())
else:
for slave in self.connection_pool.rotate_slaves():
try:
return self.connect_to(slave)
except ConnectionError:
continue
raise SlaveNotFoundError # Never be here
def read_response(self):
try:
return super(SentinelManagedConnection, self).read_response()
except ReadOnlyError:
if self.connection_pool.is_master:
# When talking to a master, a ReadOnlyError when likely
# indicates that the previous master that we're still connected
# to has been demoted to a slave and there's a new master.
# calling disconnect will force the connection to re-query
# sentinel during the next connect() attempt.
self.disconnect()
raise ConnectionError('The previous master is now a slave')
raise
class SentinelConnectionPool(ConnectionPool):
"""
Sentinel backed connection pool.
If ``check_connection`` flag is set to True, SentinelManagedConnection
sends a PING command right after establishing the connection.
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs['connection_class'] = kwargs.get(
'connection_class', SentinelManagedConnection)
self.is_master = kwargs.pop('is_master', True)
self.check_connection = kwargs.pop('check_connection', False)
super(SentinelConnectionPool, self).__init__(**kwargs)
self.connection_kwargs['connection_pool'] = weakref.proxy(self)
self.service_name = service_name
self.sentinel_manager = sentinel_manager
def __repr__(self):
return "%s<service=%s(%s)" % (
type(self).__name__,
self.service_name,
self.is_master and 'master' or 'slave',
)
def reset(self):
super(SentinelConnectionPool, self).reset()
self.master_address = None
self.slave_rr_counter = None
def owns_connection(self, connection):
check = not self.is_master or \
(self.is_master and
self.master_address == (connection.host, connection.port))
parent = super(SentinelConnectionPool, self)
return check and parent.owns_connection(connection)
def get_master_address(self):
master_address = self.sentinel_manager.discover_master(
self.service_name)
if self.is_master:
if self.master_address != master_address:
self.master_address = master_address
# disconnect any idle connections so that they reconnect
# to the new master the next time that they are used.
self.disconnect(inuse_connections=False)
return master_address
def rotate_slaves(self):
"Round-robin slave balancer"
slaves = self.sentinel_manager.discover_slaves(self.service_name)
if slaves:
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in xrange(len(slaves)):
self.slave_rr_counter = (
self.slave_rr_counter + 1) % len(slaves)
slave = slaves[self.slave_rr_counter]
yield slave
# Fallback to the master connection
try:
yield self.get_master_address()
except MasterNotFoundError:
pass
raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
class Sentinel(object):
"""
Redis Sentinel cluster client
>>> from redis.sentinel import Sentinel
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
>>> master.set('foo', 'bar')
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
>>> slave.get('foo')
b'bar'
``sentinels`` is a list of sentinel nodes. Each node is represented by
a pair (hostname, port).
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
When querying a sentinel, if it doesn't meet this threshold, responses
from that sentinel won't be considered valid.
``sentinel_kwargs`` is a dictionary of connection arguments used when
connecting to sentinel instances. Any argument that can be passed to
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
not specified, any socket_timeout and socket_keepalive options specified
in ``connection_kwargs`` will be used.
``connection_kwargs`` are keyword arguments that will be used when
establishing a connection to a Redis server.
"""
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = {
k: v
for k, v in iteritems(connection_kwargs)
if k.startswith('socket_')
}
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs)
for hostname, port in sentinels]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
sentinel_addresses.append('%s:%s' % (
sentinel.connection_pool.connection_kwargs['host'],
sentinel.connection_pool.connection_kwargs['port'],
))
return '%s<sentinels=[%s]>' % (
type(self).__name__,
','.join(sentinel_addresses))
def check_master_state(self, state, service_name):
if not state['is_master'] or state['is_sdown'] or state['is_odown']:
return False
# Check if our sentinel doesn't see other nodes
if state['num-other-sentinels'] < self.min_other_sentinels:
return False
return True
def discover_master(self, service_name):
"""
Asks sentinel servers for the Redis master's address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.
"""
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = sentinel.sentinel_masters()
except (ConnectionError, TimeoutError):
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel, self.sentinels[0])
return state['ip'], state['port']
raise MasterNotFoundError("No master found for %r" % (service_name,))
def filter_slaves(self, slaves):
"Remove slaves that are in an ODOWN or SDOWN state"
slaves_alive = []
for slave in slaves:
if slave['is_odown'] or slave['is_sdown']:
continue
slaves_alive.append((slave['ip'], slave['port']))
return slaves_alive
def discover_slaves(self, service_name):
"Returns a list of alive slaves for service ``service_name``"
for sentinel in self.sentinels:
try:
slaves = sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
def master_for(self, service_name, redis_class=Redis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns a redis client instance for the ``service_name`` master.
A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
used to retrive the master's address before establishing a new
connection.
NOTE: If the master's address has changed, any cached connections to
the old master are closed.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to
use. The :py:class:`~redis.sentinel.SentinelConnectionPool`
will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs))
def slave_for(self, service_name, redis_class=Redis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrive the slave's
address before establishing a new connection.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs))
|
"""barcode.base
"""
from __future__ import unicode_literals
from barcode.writer import SVGWriter
class Barcode(object):
name = ''
raw = None
digits = 0
default_writer = SVGWriter
default_writer_options = {
'module_width': 0.2,
'module_height': 15.0,
'quiet_zone': 6.5,
'font_size': 10,
'text_distance': 5.0,
'background': 'white',
'foreground': 'black',
'write_text': True,
'text': '',
}
def to_ascii(self):
code = self.build()
for i, line in enumerate(code):
code[i] = line.replace('1', 'X').replace('0', ' ')
return '\n'.join(code)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
self.get_fullcode())
def build(self):
raise NotImplementedError
def get_fullcode(self):
"""Returns the full code, encoded in the barcode.
:returns: Full human readable code.
:rtype: String
"""
raise NotImplementedError
def save(self, filename, options=None):
"""Renders the barcode and saves it in `filename`.
:parameters:
filename : String
Filename to save the barcode in (without filename
extension).
options : Dict
The same as in `self.render`.
:returns: The full filename with extension.
:rtype: String
"""
output = self.render(options)
_filename = self.writer.save(filename, output)
return _filename
def write(self, fp, options=None):
"""Renders the barcode and writes it to the file like object
`fp`.
:parameters:
fp : File like object
Object to write the raw data in.
options : Dict
The same as in `self.render`.
"""
output = self.render(options)
if hasattr(output, 'tostring'):
output.save(fp, format=self.writer.format)
else:
fp.write(output)
def render(self, writer_options=None):
"""Renders the barcode using `self.writer`.
:parameters:
writer_options : Dict
Options for `self.writer`, see writer docs for details.
:returns: Output of the writers render method.
"""
options = Barcode.default_writer_options.copy()
options.update(writer_options or {})
if options['write_text']:
options['text'] = self.get_fullcode()
self.writer.set_options(options)
code = self.build()
raw = Barcode.raw = self.writer.render(code)
return raw
|
if __name__ == "__main__":
from edaboweb.edaboweb import app, engine
from edaboweb.mb_database import init_db
from edaboweb.db_models import db
init_db(engine)
with app.app_context():
db.init_app(app)
db.create_all()
app.run(debug=True)
|
from io import BytesIO
from unittest.mock import Mock, call
from uuid import UUID
import pytest
from botocore.exceptions import ClientError as BotoClientError
from flask import url_for
from notifications_python_client.errors import HTTPError
from app.s3_client.s3_logo_client import (
LETTER_TEMP_LOGO_LOCATION,
permanent_letter_logo_name,
)
from tests.conftest import normalize_spaces
def test_letter_branding_page_shows_full_branding_list(
client_request,
platform_admin_user,
mock_get_all_letter_branding
):
client_request.login(platform_admin_user)
page = client_request.get('.letter_branding')
links = page.select('.message-name a')
brand_names = [normalize_spaces(link.text) for link in links]
hrefs = [link['href'] for link in links]
assert normalize_spaces(
page.select_one('h1').text
) == "Letter branding"
assert page.select('.govuk-grid-column-three-quarters a')[-1]['href'] == url_for('main.create_letter_branding')
assert brand_names == [
'HM Government',
'Land Registry',
'Animal and Plant Health Agency',
]
assert hrefs == [
url_for('.update_letter_branding', branding_id=str(UUID(int=0))),
url_for('.update_letter_branding', branding_id=str(UUID(int=1))),
url_for('.update_letter_branding', branding_id=str(UUID(int=2))),
]
def test_update_letter_branding_shows_the_current_letter_brand(
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid,
):
client_request.login(platform_admin_user)
page = client_request.get(
'.update_letter_branding',
branding_id=fake_uuid,
)
assert page.find('h1').text == 'Update letter branding'
assert page.select_one('#logo-img > img')['src'].endswith('/hm-government.svg')
assert page.select_one('#name').attrs.get('value') == 'HM Government'
assert page.select_one('#file').attrs.get('accept') == '.svg'
def test_update_letter_branding_with_new_valid_file(
mocker,
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
filename = 'new_file.svg'
expected_temp_filename = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename=filename)
mock_s3_upload = mocker.patch('app.s3_client.s3_logo_client.utils_s3upload')
mocker.patch('app.s3_client.s3_logo_client.uuid.uuid4', return_value=fake_uuid)
mock_delete_temp_files = mocker.patch('app.main.views.letter_branding.delete_letter_temp_file')
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
_data={'file': (BytesIO(''.encode('utf-8')), filename)},
_follow_redirects=True,
)
assert page.select_one('#logo-img > img')['src'].endswith(expected_temp_filename)
assert page.select_one('#name').attrs.get('value') == 'HM Government'
assert mock_s3_upload.called
assert mock_delete_temp_files.called is False
def test_update_letter_branding_when_uploading_invalid_file(
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid,
):
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
_data={'file': (BytesIO(''.encode('utf-8')), 'test.png')},
_follow_redirects=True
)
assert page.find('h1').text == 'Update letter branding'
assert page.select_one('.error-message').text.strip() == 'SVG Images only!'
def test_update_letter_branding_deletes_any_temp_files_when_uploading_a_file(
mocker,
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid,
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename='temp.svg')
mock_s3_upload = mocker.patch('app.s3_client.s3_logo_client.utils_s3upload')
mock_delete_temp_files = mocker.patch('app.main.views.letter_branding.delete_letter_temp_file')
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
logo=temp_logo,
_data={'file': (BytesIO(''.encode('utf-8')), 'new_uploaded_file.svg')},
_follow_redirects=True,
)
assert mock_s3_upload.called
assert mock_delete_temp_files.called
assert page.find('h1').text == 'Update letter branding'
def test_update_letter_branding_with_original_file_and_new_details(
mocker,
client_request,
platform_admin_user,
mock_get_all_letter_branding,
mock_get_letter_branding_by_id,
fake_uuid
):
mock_client_update = mocker.patch('app.main.views.letter_branding.letter_branding_client.update_letter_branding')
mock_upload_logos = mocker.patch('app.main.views.letter_branding.upload_letter_svg_logo')
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
_data={
'name': 'Updated name',
'operation': 'branding-details'
},
_follow_redirects=True,
)
assert page.find('h1').text == 'Letter branding'
assert mock_upload_logos.called is False
mock_client_update.assert_called_once_with(
branding_id=fake_uuid,
filename='hm-government',
name='Updated name'
)
def test_update_letter_branding_shows_form_errors_on_name_fields(
mocker,
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid
):
mocker.patch('app.main.views.letter_branding.letter_branding_client.update_letter_branding')
logo = permanent_letter_logo_name('hm-government', 'svg')
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
logo=logo,
_data={
'name': '',
'operation': 'branding-details'
},
_follow_redirects=True
)
error_messages = page.find_all('span', class_='govuk-error-message')
assert page.find('h1').text == 'Update letter branding'
assert len(error_messages) == 1
assert 'This field is required.' in error_messages[0].text.strip()
def test_update_letter_branding_shows_database_errors_on_name_field(
mocker,
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid,
):
mocker.patch('app.main.views.letter_branding.letter_branding_client.update_letter_branding', side_effect=HTTPError(
response=Mock(
status_code=400,
json={
'result': 'error',
'message': {
'name': {
'name already in use'
}
}
}
),
message={'name': ['name already in use']}
))
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
_data={
'name': 'my brand',
'operation': 'branding-details'
},
_expected_status=200,
)
error_message = page.find('span', class_='govuk-error-message').text.strip()
assert page.find('h1').text == 'Update letter branding'
assert 'name already in use' in error_message
def test_update_letter_branding_with_new_file_and_new_details(
mocker,
client_request,
platform_admin_user,
mock_get_all_letter_branding,
mock_get_letter_branding_by_id,
fake_uuid
):
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=fake_uuid, unique_id=fake_uuid, filename='new_file.svg')
mock_client_update = mocker.patch('app.main.views.letter_branding.letter_branding_client.update_letter_branding')
mock_persist_logo = mocker.patch('app.main.views.letter_branding.persist_logo')
mock_delete_temp_files = mocker.patch('app.main.views.letter_branding.delete_letter_temp_files_created_by')
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
logo=temp_logo,
_data={
'name': 'Updated name',
'operation': 'branding-details'
},
_follow_redirects=True
)
assert page.find('h1').text == 'Letter branding'
mock_client_update.assert_called_once_with(
branding_id=fake_uuid,
filename='{}-new_file'.format(fake_uuid),
name='Updated name'
)
mock_persist_logo.assert_called_once_with(
temp_logo,
'letters/static/images/letter-template/{}-new_file.svg'.format(fake_uuid)
)
mock_delete_temp_files.assert_called_once_with(fake_uuid)
def test_update_letter_branding_rolls_back_db_changes_and_shows_error_if_saving_to_s3_fails(
mocker,
client_request,
platform_admin_user,
mock_get_letter_branding_by_id,
fake_uuid
):
mock_client_update = mocker.patch('app.main.views.letter_branding.letter_branding_client.update_letter_branding')
mocker.patch('app.main.views.letter_branding.upload_letter_svg_logo', side_effect=BotoClientError({}, 'error'))
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=fake_uuid, unique_id=fake_uuid, filename='new_file.svg')
client_request.login(platform_admin_user)
page = client_request.post(
'.update_letter_branding',
branding_id=fake_uuid,
logo=temp_logo,
_data={
'name': 'Updated name',
'operation': 'branding-details'
},
_follow_redirects=True,
)
assert page.find('h1').text == 'Update letter branding'
assert page.select_one('.error-message').text.strip() == 'Error saving uploaded file - try uploading again'
assert mock_client_update.call_count == 2
assert mock_client_update.call_args_list == [
call(branding_id=fake_uuid, filename='{}-new_file'.format(fake_uuid), name='Updated name'),
call(branding_id=fake_uuid, filename='hm-government', name='HM Government')
]
def test_create_letter_branding_does_not_show_branding_info(
client_request,
platform_admin_user,
):
client_request.login(platform_admin_user)
page = client_request.get('.create_letter_branding')
assert page.select_one('#logo-img > img') is None
assert page.select_one('#name').attrs.get('value') is None
assert page.select_one('#file').attrs.get('accept') == '.svg'
def test_create_letter_branding_when_uploading_valid_file(
mocker,
client_request,
platform_admin_user,
fake_uuid
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
filename = 'test.svg'
expected_temp_filename = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename=filename)
mock_s3_upload = mocker.patch('app.s3_client.s3_logo_client.utils_s3upload')
mocker.patch('app.s3_client.s3_logo_client.uuid.uuid4', return_value=fake_uuid)
mock_delete_temp_files = mocker.patch('app.main.views.letter_branding.delete_letter_temp_file')
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
_data={'file': (BytesIO("""
<svg height="100" width="100">
<circle cx="50" cy="50" r="40" stroke="black" stroke-width="3" fill="red" /></svg>
""".encode('utf-8')), filename)},
_follow_redirects=True,
)
assert page.select_one('#logo-img > img').attrs['src'].endswith(expected_temp_filename)
assert mock_s3_upload.called
assert mock_delete_temp_files.called is False
@pytest.mark.parametrize('svg_contents, expected_error', (
(
'''
<svg height="100" width="100">
<image href="someurlgoeshere" x="0" y="0" height="100" width="100"></image></svg>
''',
'This SVG has an embedded raster image in it and will not render well',
),
(
'''
<svg height="100" width="100">
<text>Will render differently depending on fonts installed</text>
</svg>
''',
'This SVG has text which has not been converted to paths and may not render well',
),
))
def test_create_letter_branding_fails_validation_when_uploading_SVG_with_bad_element(
mocker,
client_request,
platform_admin_user,
fake_uuid,
svg_contents,
expected_error,
):
filename = 'test.svg'
mock_s3_upload = mocker.patch('app.s3_client.s3_logo_client.utils_s3upload')
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
_data={'file': (BytesIO(svg_contents.encode('utf-8')), filename)},
_follow_redirects=True,
)
assert normalize_spaces(page.find('h1').text) == "Add letter branding"
assert normalize_spaces(page.select_one(".error-message").text) == expected_error
assert page.findAll('div', {'id': 'logo-img'}) == []
assert mock_s3_upload.called is False
def test_create_letter_branding_when_uploading_invalid_file(
client_request,
platform_admin_user,
):
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
_data={'file': (BytesIO(''.encode('utf-8')), 'test.png')},
_follow_redirects=True,
)
assert page.find('h1').text == 'Add letter branding'
assert page.select_one('.error-message').text.strip() == 'SVG Images only!'
def test_create_letter_branding_deletes_temp_files_when_uploading_a_new_file(
mocker,
client_request,
platform_admin_user,
fake_uuid,
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename='temp.svg')
mock_s3_upload = mocker.patch('app.s3_client.s3_logo_client.utils_s3upload')
mock_delete_temp_files = mocker.patch('app.main.views.letter_branding.delete_letter_temp_file')
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
logo=temp_logo,
_data={'file': (BytesIO(''.encode('utf-8')), 'new.svg')},
_follow_redirects=True
)
assert mock_s3_upload.called
assert mock_delete_temp_files.called
assert page.find('h1').text == 'Add letter branding'
def test_create_new_letter_branding_shows_preview_of_logo(
mocker,
client_request,
platform_admin_user,
fake_uuid
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename='temp.svg')
client_request.login(platform_admin_user)
page = client_request.get(
'.create_letter_branding',
logo=temp_logo,
)
assert page.find('h1').text == 'Add letter branding'
assert page.select_one('#logo-img > img').attrs['src'].endswith(temp_logo)
def test_create_letter_branding_shows_an_error_when_submitting_details_with_no_logo(
client_request,
platform_admin_user,
fake_uuid
):
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
_data={
'name': 'Test brand',
'operation': 'branding-details'
},
_expected_status=200,
)
assert page.find('h1').text == 'Add letter branding'
assert page.select_one('.error-message').text.strip() == 'You need to upload a file to submit'
def test_create_letter_branding_persists_logo_when_all_data_is_valid(
mocker,
client_request,
platform_admin_user,
fake_uuid,
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename='test.svg')
mock_letter_client = mocker.patch('app.main.views.letter_branding.letter_branding_client')
mock_persist_logo = mocker.patch('app.main.views.letter_branding.persist_logo')
mock_delete_temp_files = mocker.patch('app.main.views.letter_branding.delete_letter_temp_files_created_by')
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
logo=temp_logo,
_data={
'name': 'Test brand',
'operation': 'branding-details'
},
_follow_redirects=True
)
assert page.find('h1').text == 'Letter branding'
mock_letter_client.create_letter_branding.assert_called_once_with(
filename='{}-test'.format(fake_uuid), name='Test brand'
)
mock_persist_logo.assert_called_once_with(
temp_logo,
'letters/static/images/letter-template/{}-test.svg'.format(fake_uuid)
)
mock_delete_temp_files.assert_called_once_with(user_id)
def test_create_letter_branding_shows_form_errors_on_name_field(
client_request,
platform_admin_user,
fake_uuid
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename='test.svg')
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
logo=temp_logo,
_data={
'name': '',
'operation': 'branding-details'
},
_expected_status=200,
)
error_messages = page.find_all('span', class_='govuk-error-message')
assert page.find('h1').text == 'Add letter branding'
assert len(error_messages) == 1
assert 'This field is required.' in error_messages[0].text.strip()
def test_create_letter_branding_shows_database_errors_on_name_fields(
mocker,
client_request,
platform_admin_user,
fake_uuid,
):
with client_request.session_transaction() as session:
user_id = session["user_id"]
mocker.patch('app.main.views.letter_branding.letter_branding_client.create_letter_branding', side_effect=HTTPError(
response=Mock(
status_code=400,
json={
'result': 'error',
'message': {
'name': {
'name already in use'
}
}
}
),
message={'name': ['name already in use']}
))
temp_logo = LETTER_TEMP_LOGO_LOCATION.format(user_id=user_id, unique_id=fake_uuid, filename='test.svg')
client_request.login(platform_admin_user)
page = client_request.post(
'.create_letter_branding',
logo=temp_logo,
_data={
'name': 'my brand',
'operation': 'branding-details'
},
_expected_status=200,
)
error_message = page.find('span', class_='govuk-error-message').text.strip()
assert page.find('h1').text == 'Add letter branding'
assert 'name already in use' in error_message
|
import json
from TM1py.Objects.User import User
from TM1py.Services.ObjectService import ObjectService
class SecurityService(ObjectService):
""" Service to handle Security stuff
"""
def __init__(self, rest):
super().__init__(rest)
def determine_actual_user_name(self, user_name):
return self.determine_actual_object_name(object_class="Users", object_name=user_name)
def determine_actual_group_name(self, group_name):
return self.determine_actual_object_name(object_class="Groups", object_name=group_name)
def create_user(self, user):
""" Create a user on TM1 Server
:param user: instance of TM1py.User
:return: response
"""
request = '/api/v1/Users'
return self._rest.POST(request, user.body)
def create_group(self, group_name):
""" Create a Security group in the TM1 Server
:param group_name:
:return:
"""
request = '/api/v1/Groups'
return self._rest.POST(request, json.dumps({"Name": group_name}))
def get_user(self, user_name):
""" Get user from TM1 Server
:param user_name:
:return: instance of TM1py.User
"""
user_name = self.determine_actual_user_name(user_name)
request = "/api/v1/Users('{}')?$expand=Groups".format(user_name)
response = self._rest.GET(request)
return User.from_dict(response.json())
def get_current_user(self):
""" Get user and group assignments of this session
:return: instance of TM1py.User
"""
request = "/api/v1/ActiveUser?$expand=Groups"
response = self._rest.GET(request)
return User.from_dict(response.json())
def update_user(self, user):
""" Update user on TM1 Server
:param user: instance of TM1py.User
:return: response
"""
user.name = self.determine_actual_user_name(user.name)
for current_group in self.get_groups(user.name):
if current_group not in user.groups:
self.remove_user_from_group(current_group, user.name)
request = "/api/v1/Users('{}')".format(user.name)
return self._rest.PATCH(request, user.body)
def delete_user(self, user_name):
""" Delete user on TM1 Server
:param user_name:
:return: response
"""
user_name = self.determine_actual_user_name(user_name)
request = "/api/v1/Users('{}')".format(user_name)
return self._rest.DELETE(request)
def delete_group(self, group_name):
""" Delete a group in the TM1 Server
:param group_name:
:return:
"""
group_name = self.determine_actual_group_name(group_name)
request = "/api/v1/Groups('{}')".format(group_name)
return self._rest.DELETE(request)
def get_all_users(self):
""" Get all users from TM1 Server
:return: List of TM1py.User instances
"""
request = '/api/v1/Users?$expand=Groups'
response = self._rest.GET(request)
users = [User.from_dict(user) for user in response.json()['value']]
return users
def get_all_user_names(self):
""" Get all user names from TM1 Server
:return: List of TM1py.User instances
"""
request = '/api/v1/Users?select=Name'
response = self._rest.GET(request)
users = [user["Name"] for user in response.json()['value']]
return users
def get_users_from_group(self, group_name):
""" Get all users from group
:param group_name:
:return: List of TM1py.User instances
"""
request = '/api/v1/Groups(\'{}\')?$expand=Users($expand=Groups)'.format(group_name)
response = self._rest.GET(request)
users = [User.from_dict(user) for user in response.json()['Users']]
return users
def get_user_names_from_group(self, group_name):
""" Get all users from group
:param group_name:
:return: List of strings
"""
request = '/api/v1/Groups(\'{}\')?$expand=Users($expand=Groups)'.format(group_name)
response = self._rest.GET(request)
users = [user["Name"] for user in response.json()['Users']]
return users
def get_groups(self, user_name):
""" Get the groups of a user in TM1 Server
:param user_name:
:return: List of strings
"""
user_name = self.determine_actual_user_name(user_name)
request = '/api/v1/Users(\'{}\')/Groups'.format(user_name)
response = self._rest.GET(request)
return [group['Name'] for group in response.json()['value']]
def add_user_to_groups(self, user_name, groups):
"""
:param user_name: name of user
:param groups: iterable of groups
:return: response
"""
user_name = self.determine_actual_user_name(user_name)
request = "/api/v1/Users('{}')".format(user_name)
body = {
"Name": user_name,
"Groups@odata.bind": ["Groups('{}')".format(self.determine_actual_group_name(group))
for group
in groups]
}
return self._rest.PATCH(request, json.dumps(body))
def remove_user_from_group(self, group_name, user_name):
""" Remove user from group in TM1 Server
:param group_name:
:param user_name:
:return: response
"""
user_name = self.determine_actual_user_name(user_name)
group_name = self.determine_actual_group_name(group_name)
request = '/api/v1/Users(\'{}\')/Groups?$id=Groups(\'{}\')'.format(user_name, group_name)
return self._rest.DELETE(request)
def get_all_groups(self):
""" Get all groups from TM1 Server
:return: List of strings
"""
request = '/api/v1/Groups?$select=Name'
response = self._rest.GET(request)
groups = [entry['Name'] for entry in response.json()['value']]
return groups
def security_refresh(self):
from TM1py.Services import ProcessService
ti = "SecurityRefresh;"
process_service = ProcessService(self._rest)
return process_service.execute_ti_code(ti)
|
"""
Python setup file for the companies app.
"""
import os
from setuptools import setup, find_packages
import companies as app
dev_requires = [
'flake8',
]
install_requires = [
# User should install requirements
]
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="valuehorizon-companies",
version=app.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, app, reusable, companies, valuehorizon',
author='Quincy Alexander',
author_email='qalexander@valuehorizon.com',
url="https://github.com/Valuehorizon/valuehorizon-companies",
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_requires,
},
test_suite="companies.tests.runtests.runtests"
)
|
"""
WSGI config for foreign_guides project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "foreign_guides.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
import nvksupport
import subprocess
import tempfile
import random
import sys
import os
ffmpeg_dir = 'ffmpeg-3.3.1-win64-shared'
mediaInfo_dir = 'MediaInfo'
mediaInfo_exec = os.path.join(mediaInfo_dir, 'MediaInfo.exe')
ffmpeg_exec = os.path.join(ffmpeg_dir, 'bin', 'ffmpeg.exe')
ffmpeg_exec = 'ffmpeg'
ffprobe_exec = os.path.join(ffmpeg_dir, 'bin', 'ffprobe.exe')
ffprobe_exec = 'ffprobe'
fdk_exec = 'fdkaac.exe'
x264_exec = 'x264.2833kMod.x86_64.exe'
avs_exec = 'avs4x264mod.exe'
remuxer_exec = 'remuxer.exe'
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
print('Audio Track from Video')
sourceVideo = input().replace('\"', '').strip()
sourceVideo_split = list(os.path.split(sourceVideo))
sourceVideo_split[1:] = os.path.splitext(sourceVideo_split[1])
tempDir = tempfile.gettempdir()+'\\'
tempName = 'prr' + str(random.randint(0,255))
tempPath = tempDir + tempName
tempAudioPath = tempPath + '_a' + '.m4a'
libDir = scriptDir + '\\Program\\'
os.chdir(libDir)
extractAudioArgs_fdk = [
ffmpeg_exec,
'-loglevel', 'quiet',
'-i', sourceVideo,
'-map', '0:a',
'-c:a', 'pcm_s16le',
'-f', 'wav', '-', '|',
fdk_exec,
'-I', '-m', '1', '-',
'-o', tempAudioPath
]
subprocess.run(extractAudioArgs_fdk, shell = True)
print('Video Stream')
targetVideo = input().replace('\"', '')
targetVideo_split = list(os.path.split(targetVideo))
outPath = ''.join(
[
targetVideo_split[0],
'\\[remuxed]',
targetVideo_split[1]
]
)
remuxArg = [
remuxer_exec,
'-i', targetVideo, '-i', tempAudioPath,
'-o', outPath
]
subprocess.run(remuxArg)
os.remove(tempAudioPath)
|
import liblo
import numpy
class Pattern(object):
def __init__(self, tracks=8, steps=16):
self.steps = numpy.zeros((steps, tracks), bool)
self.muted = numpy.zeros(tracks, bool)
@property
def num_tracks(self):
return self.steps.shape[1]
@property
def num_steps(self):
return self.steps.shape[0]
def set_step(self, track, step):
self.steps[step, track] = True
def clear_step(self, track, step):
self.steps[step, track] = False
def mute(self, track):
self.muted[track] = True
def unmute(self, track):
self.muted[track] = False
def print_(self):
for track in range(self.num_tracks):
for step in range(self.num_steps):
if self.steps[step, track]:
print '*',
else:
print ' ',
print
class SharedPattern(Pattern):
def __init__(self, address=8765):
Pattern.__init__(self)
self.target = liblo.Address(address)
def set_step(self, track, step):
if not self.steps[step, track]:
liblo.send(self.target, '/pattern/set', track, step)
Pattern.set_step(self, track, step)
def clear_step(self, track, step):
if self.steps[step, track]:
liblo.send(self.target, '/pattern/clear', track, step)
Pattern.clear_step(self, track, step)
def mute(self, track):
if not self.muted[track]:
liblo.send(self.target, '/pattern/mute', track)
Pattern.mute(self, track)
def unmute(self, track):
if self.muted[track]:
liblo.send(self.target, '/pattern/unmute', track)
Pattern.unmute(self, track)
class PatternListener(liblo.ServerThread):
def __init__(self, address=8765):
liblo.ServerThread.__init__(self, address)
self.pattern = Pattern()
@liblo.make_method('/pattern/set', 'ii')
def set_callback(self, path, args):
track, step = args
self.pattern.set_step(track, step)
@liblo.make_method('/pattern/clear', 'ii')
def clear_callback(self, path, args):
track, step = args
self.pattern.clear_step(track, step)
@liblo.make_method('/pattern/mute', 'i')
def mute_callback(self, path, track):
self.pattern.mute(track)
@liblo.make_method('/pattern/unmute', 'i')
def unmute_callback(self, path, track):
self.pattern.unmute(track)
|
import unittest
from ROOM.room import Room, LivingSpace
class LivingSpaceTest(unittest.TestCase):
def setUp(self):
self.some_living_space = LivingSpace('Yellow')
def test_LivingSpace_inherits_Room(self):
self.assertTrue(issubclass(LivingSpace, Room), msg='LivingSpace is not inheriting from Room')
def test_living_space_name(self):
self.assertEqual(self.some_living_space.name, 'Yellow', msg='Invalid living space name')
def test_living_space_type(self):
self.assertEqual(self.some_living_space.type, 'living', msg='Living space type should be living')
def test_living_space_occupants_is_dictionary(self):
self.assertEqual(type(self.some_living_space.occupants), dict, msg='Living space occupants is not a dictionary')
def test_maximum_living_sapce_occupants(self):
self.assertEqual(self.some_living_space.maximum_occupants, 4, msg='Wrong number of maximum living space '
'occupants')
def test_living_space_occupants(self):
self.assertEqual(type(self.some_living_space.occupants), dict, msg='No occupants dictionary in LivingSpace '
'class ')
|
from experiments.launcher import run_from_code
from experiments import mnist_variations, synthetic, mnist_background_transfer
if __name__ == '__main__':
# run_from_code(experiment=synthetic, model='avb', pretrained_model=None, noise_mode='product')
# run_from_code(experiment=mnist_variations, n_datasets=2, model='avb', pretrained_model=None)
run_from_code(experiment=mnist_background_transfer, model='avb',
pretrained_model='output/conjoint_avb/mnist_variations_two/best')
|
from django_recurrences.db.models.mixins import AbstractRecurrenceModelMixin
class RecurrenceTestModel(AbstractRecurrenceModelMixin):
"""Test model that implements."""
|
import abjad
from abjad.tools import spannertools
class ClefSpanner(spannertools.Spanner):
r'''Clef spanner.
::
>>> staff = abjad.Staff("c' d' e' f' g' a' b' c''")
>>> clef = abjad.Clef('treble')
>>> abjad.attach(clef, staff[0])
>>> print(format(staff))
\new Staff {
\clef "treble"
c'4
d'4
e'4
f'4
g'4
a'4
b'4
c''4
}
::
>>> clef_spanner = consort.ClefSpanner('percussion')
>>> abjad.attach(clef_spanner, staff[2:-2])
>>> print(format(staff))
\new Staff {
\clef "treble"
c'4
d'4
\clef "percussion"
e'4
f'4
g'4
a'4
\clef "treble"
b'4
c''4
}
::
>>> staff = abjad.Staff("r4 c'4 d'4 r4 e'4 f'4 r4")
>>> clef = abjad.Clef('treble')
>>> abjad.attach(clef, staff[0])
>>> clef_spanner = consort.ClefSpanner('percussion')
>>> abjad.attach(clef_spanner, staff[1:3])
>>> clef_spanner = consort.ClefSpanner('percussion')
>>> abjad.attach(clef_spanner, staff[4:6])
>>> print(format(staff))
\new Staff {
\clef "treble"
r4
\clef "percussion"
c'4
d'4
r4
e'4
f'4
\clef "treble"
r4
}
'''
### CLASS VARIABLES ###
__slots__ = (
'_clef',
)
### INITIALIZER ###
def __init__(
self,
clef='percussion',
overrides=None,
):
spannertools.Spanner.__init__(
self,
overrides=overrides,
)
clef = abjad.Clef(clef)
self._clef = clef
### SPECIAL METHODS ###
def __getnewargs__(self):
r'''Gets new arguments of spanner.
Returns empty tuple.
'''
return (
self.clef,
)
### PRIVATE METHODS ###
def _copy_keyword_args(self, new):
new._clef = self.clef
def _get_lilypond_format_bundle(self, leaf):
import consort
lilypond_format_bundle = self._get_basic_lilypond_format_bundle(leaf)
prototype = (abjad.Note, abjad.Chord, type(None))
first_leaf = self._get_leaves()[0]
current_clef = abjad.inspect(first_leaf).get_effective(abjad.Clef)
set_clef = False
reset_clef = False
if self._is_my_only_leaf(leaf):
consort.debug('ONLY', leaf)
if self.clef != current_clef:
set_clef = True
reset_clef = True
previous_leaf = abjad.inspect(leaf).get_leaf(-1)
consort.debug('\tP', previous_leaf)
while not isinstance(previous_leaf, prototype):
previous_leaf = abjad.inspect(previous_leaf).get_leaf(-1)
consort.debug('\tP', previous_leaf)
if previous_leaf is not None:
spanners = abjad.inspect(previous_leaf).get_spanners(
type(self))
spanners = tuple(spanners)
if spanners:
consort.debug('\tPREV?', spanners)
if spanners[0].clef == self.clef:
set_clef = False
next_leaf = abjad.inspect(leaf).get_leaf(1)
consort.debug('\tN', next_leaf)
while not isinstance(next_leaf, prototype):
next_leaf = abjad.inspect(next_leaf).get_leaf(1)
consort.debug('\tN', next_leaf)
if next_leaf is not None:
spanners = abjad.inspect(next_leaf).get_spanners(type(self))
spanners = tuple(spanners)
if spanners:
consort.debug('\tNEXT?', spanners)
if spanners[0].clef == self.clef:
reset_clef = False
elif self._is_my_first_leaf(leaf):
consort.debug('FIRST', leaf)
if self.clef != current_clef:
set_clef = True
previous_leaf = abjad.inspect(leaf).get_leaf(-1)
consort.debug('\tP', previous_leaf)
while not isinstance(previous_leaf, prototype):
previous_leaf = abjad.inspect(previous_leaf).get_leaf(-1)
consort.debug('\tP', previous_leaf)
if previous_leaf is not None:
spanners = abjad.inspect(previous_leaf).get_spanners(type(self))
spanners = tuple(spanners)
if spanners:
consort.debug('\tPREV?', spanners)
if spanners[0].clef == self.clef:
set_clef = False
elif self._is_my_last_leaf(leaf):
consort.debug('LAST', leaf)
if self.clef != current_clef and current_clef is not None:
reset_clef = True
next_leaf = abjad.inspect(leaf).get_leaf(1)
consort.debug('\tN', next_leaf)
while not isinstance(next_leaf, prototype):
next_leaf = abjad.inspect(next_leaf).get_leaf(1)
consort.debug('\tN', next_leaf)
if next_leaf is not None:
spanners = abjad.inspect(next_leaf).get_spanners(type(self))
spanners = tuple(spanners)
if spanners:
consort.debug('\tNEXT?', spanners)
if spanners[0].clef == self.clef:
reset_clef = False
if set_clef:
string = format(self.clef, 'lilypond')
lilypond_format_bundle.before.indicators.append(string)
if reset_clef and current_clef is not None:
string = format(current_clef, 'lilypond')
lilypond_format_bundle.after.indicators.append(string)
return lilypond_format_bundle
### PUBLIC PROPERTIES ###
@property
def clef(self):
return self._clef
|
import ResModel_pyramid
import tensorflow as tf
import numpy as np
import re
from yolo.net.net import Net
class YoloTinyNet(Net):
def __init__(self, common_params, net_params, test=False):
"""
common params: a params dict
net_params : a params dict
"""
super(YoloTinyNet, self).__init__(common_params, net_params)
#process params
self.image_size = int(common_params['image_size'])
self.num_classes = int(common_params['num_classes'])
self.cell_size = int(net_params['cell_size'])
self.boxes_per_cell = int(net_params['boxes_per_cell'])
self.batch_size = int(common_params['batch_size'])
self.weight_decay = float(net_params['weight_decay'])
if not test:
self.object_scale = float(net_params['object_scale'])
self.noobject_scale = float(net_params['noobject_scale'])
self.class_scale = float(net_params['class_scale'])
self.coord_scale = float(net_params['coord_scale'])
def inference(self, images):
"""Build the yolo model
Args:
images: 4-D tensor [batch_size, image_height, image_width, channels]
Returns:
predicts: 4-D tensor [batch_size, cell_size, cell_size, num_classes + 5 * boxes_per_cell]
"""
conv_num = 1
temp_conv = self.conv2d('conv' + str(conv_num), images, [3, 3, 3, 16], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 16, 32], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 32, 64], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 64, 128], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 128, 256], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
# temp_conv = ResModel_pyramid.resnet(images,20)
temp_conv = tf.transpose(temp_conv, (0, 3, 1, 2))
#Fully connected layer
'''
print 'delete Fully'
local2 = self.local('local2', temp_conv,self.cell_size * self.cell_size * 1024, 4096)
'''
local1 = self.local('local1', temp_conv, self.cell_size * self.cell_size * 1024, 256)
local2 = self.local('local2', local1, 256, 4096)
local3 = self.local('local3', local2, 4096, self.cell_size * self.cell_size * (self.num_classes + self.boxes_per_cell * 5), leaky=False, pretrain=False, train=True)
n1 = self.cell_size * self.cell_size * self.num_classes
n2 = n1 + self.cell_size * self.cell_size * self.boxes_per_cell
class_probs = tf.reshape(local3[:, 0:n1], (-1, self.cell_size, self.cell_size, self.num_classes))
scales = tf.reshape(local3[:, n1:n2], (-1, self.cell_size, self.cell_size, self.boxes_per_cell))
boxes = tf.reshape(local3[:, n2:], (-1, self.cell_size, self.cell_size, self.boxes_per_cell * 4))
local3 = tf.concat(3,[class_probs, scales, boxes])
predicts = local3
return predicts
def iou(self, boxes1, boxes2):
"""calculate ious
Args:
boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
Return:
iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
boxes1 = tf.pack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
boxes2 = tf.pack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])
#calculate the left up point
lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])
#intersection
intersection = rd - lu
inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]
mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
inter_square = mask * inter_square
#calculate the boxs1 square and boxs2 square
square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
return inter_square/(square1 + square2 - inter_square + 1e-6)
def cond1(self, num, object_num, loss, predict, label, nilboy):
"""
if num < object_num
"""
return num < object_num
def body1(self, num, object_num, loss, predict, labels, nilboy):
"""
calculate loss
Args:
predict: 3-D tensor [cell_size, cell_size, 5 * boxes_per_cell]
labels : [max_objects, 5] (x_center, y_center, w, h, class)
"""
label = labels[num:num+1, :]
label = tf.reshape(label, [-1])
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
min_x = (label[0] - label[2] / 2) / (self.image_size / self.cell_size)
max_x = (label[0] + label[2] / 2) / (self.image_size / self.cell_size)
min_y = (label[1] - label[3] / 2) / (self.image_size / self.cell_size)
max_y = (label[1] + label[3] / 2) / (self.image_size / self.cell_size)
min_x = tf.floor(min_x)
min_y = tf.floor(min_y)
max_x = tf.ceil(max_x)
max_y = tf.ceil(max_y)
temp = tf.cast(tf.pack([max_y - min_y, max_x - min_x]), dtype=tf.int32)
objects = tf.ones(temp, tf.float32)
temp = tf.cast(tf.pack([min_y, self.cell_size - max_y, min_x, self.cell_size - max_x]), tf.int32)
temp = tf.reshape(temp, (2, 2))
objects = tf.pad(objects, temp, "CONSTANT")
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
#calculate responsible tensor [CELL_SIZE, CELL_SIZE]
center_x = label[0] / (self.image_size / self.cell_size)
center_x = tf.floor(center_x)
center_y = label[1] / (self.image_size / self.cell_size)
center_y = tf.floor(center_y)
response = tf.ones([1, 1], tf.float32)
temp = tf.cast(tf.pack([center_y, self.cell_size - center_y - 1, center_x, self.cell_size -center_x - 1]), tf.int32)
temp = tf.reshape(temp, (2, 2))
response = tf.pad(response, temp, "CONSTANT")
#objects = response
#calculate iou_predict_truth [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
predict_boxes = predict[:, :, self.num_classes + self.boxes_per_cell:]
predict_boxes = tf.reshape(predict_boxes, [self.cell_size, self.cell_size, self.boxes_per_cell, 4])
predict_boxes = predict_boxes * [self.image_size / self.cell_size, self.image_size / self.cell_size, self.image_size, self.image_size]
base_boxes = np.zeros([self.cell_size, self.cell_size, 4])
for y in range(self.cell_size):
for x in range(self.cell_size):
#nilboy
base_boxes[y, x, :] = [self.image_size / self.cell_size * x, self.image_size / self.cell_size * y, 0, 0]
base_boxes = np.tile(np.resize(base_boxes, [self.cell_size, self.cell_size, 1, 4]), [1, 1, self.boxes_per_cell, 1])
predict_boxes = base_boxes + predict_boxes
iou_predict_truth = self.iou(predict_boxes, label[0:4])
#calculate C [cell_size, cell_size, boxes_per_cell]
C = iou_predict_truth * tf.reshape(response, [self.cell_size, self.cell_size, 1])
#calculate I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
I = iou_predict_truth * tf.reshape(response, (self.cell_size, self.cell_size, 1))
max_I = tf.reduce_max(I, 2, keep_dims=True)
I = tf.cast((I >= max_I), tf.float32) * tf.reshape(response, (self.cell_size, self.cell_size, 1))
#calculate no_I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
no_I = tf.ones_like(I, dtype=tf.float32) - I
p_C = predict[:, :, self.num_classes:self.num_classes + self.boxes_per_cell]
#calculate truth x,y,sqrt_w,sqrt_h 0-D
x = label[0]
y = label[1]
sqrt_w = tf.sqrt(tf.abs(label[2]))
sqrt_h = tf.sqrt(tf.abs(label[3]))
#sqrt_w = tf.abs(label[2])
#sqrt_h = tf.abs(label[3])
#calculate predict p_x, p_y, p_sqrt_w, p_sqrt_h 3-D [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
p_x = predict_boxes[:, :, :, 0]
p_y = predict_boxes[:, :, :, 1]
#p_sqrt_w = tf.sqrt(tf.abs(predict_boxes[:, :, :, 2])) * ((tf.cast(predict_boxes[:, :, :, 2] > 0, tf.float32) * 2) - 1)
#p_sqrt_h = tf.sqrt(tf.abs(predict_boxes[:, :, :, 3])) * ((tf.cast(predict_boxes[:, :, :, 3] > 0, tf.float32) * 2) - 1)
#p_sqrt_w = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 2]))
#p_sqrt_h = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 3]))
#p_sqrt_w = predict_boxes[:, :, :, 2]
#p_sqrt_h = predict_boxes[:, :, :, 3]
p_sqrt_w = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 2])))
p_sqrt_h = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 3])))
#calculate truth p 1-D tensor [NUM_CLASSES]
P = tf.one_hot(tf.cast(label[4], tf.int32), self.num_classes, dtype=tf.float32)
#calculate predict p_P 3-D tensor [CELL_SIZE, CELL_SIZE, NUM_CLASSES]
p_P = predict[:, :, 0:self.num_classes]
#class_loss
class_loss = tf.nn.l2_loss(tf.reshape(objects, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#class_loss = tf.nn.l2_loss(tf.reshape(response, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#object_loss
object_loss = tf.nn.l2_loss(I * (p_C - C)) * self.object_scale
#object_loss = tf.nn.l2_loss(I * (p_C - (C + 1.0)/2.0)) * self.object_scale
#noobject_loss
#noobject_loss = tf.nn.l2_loss(no_I * (p_C - C)) * self.noobject_scale
noobject_loss = tf.nn.l2_loss(no_I * (p_C)) * self.noobject_scale
#coord_loss
coord_loss = (tf.nn.l2_loss(I * (p_x - x)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_y - y)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w))/ self.image_size +
tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))/self.image_size) * self.coord_scale
nilboy = I
return num + 1, object_num, [loss[0] + class_loss, loss[1] + object_loss, loss[2] + noobject_loss, loss[3] + coord_loss], predict, labels, nilboy
def loss(self, predicts, labels, objects_num):
"""Add Loss to all the trainable variables
Args:
predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
labels : 3-D tensor of [batch_size, max_objects, 5]
objects_num: 1-D tensor [batch_size]
"""
class_loss = tf.constant(0, tf.float32)
object_loss = tf.constant(0, tf.float32)
noobject_loss = tf.constant(0, tf.float32)
coord_loss = tf.constant(0, tf.float32)
loss = [0, 0, 0, 0]
for i in range(self.batch_size):
predict = predicts[i, :, :, :]
label = labels[i, :, :]
object_num = objects_num[i]
nilboy = tf.ones([7,7,2])
tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
for j in range(4):
loss[j] = loss[j] + tuple_results[2][j]
nilboy = tuple_results[5]
tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)
tf.summary.scalar('class_loss', loss[0]/self.batch_size)
tf.summary.scalar('object_loss', loss[1]/self.batch_size)
tf.summary.scalar('noobject_loss', loss[2]/self.batch_size)
tf.summary.scalar('coord_loss', loss[3]/self.batch_size)
tf.summary.scalar('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )
return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy
|
import os
import cPickle as pickle
import numpy as np
from utility_tools import retrieve_most_recent_fname
from plot_embedding_tools import plot_vec, plot_zoom_vec
def retrieve_most_recent_pkl():
fpath = os.path.join('models', 'rock-letter', 'chord2vec')
fname = retrieve_most_recent_fname(fpath)
with open(fname, 'rb') as p:
results = pickle.load(p)
parser = results['parser']
print parser.idxs_and_shapes
W_vect = results['W']
syms = results['syms']
# this is the first layer weights
in_W = parser.get(W_vect, ('weights', 1))
highlight_syms = ['C', 'F', 'G']
tag = os.path.splitext(os.path.split(fname)[-1])[0]
print 'tag:', tag
plot_vec(in_W, syms, highlight_syms, tag=tag)
print 'cross_entropy: %.4f' % results['cross_entropy']
print 'best iteration: %d' % results['iter']
def retrieve_chord2vec_weights(most_recent=False, return_fname=False):
# Chord2Vec embedding optimized with stochastic gradient descent (SGD)
fpath = os.path.join('models', 'rock-letter',
'chord2vec') #, 'best_models')
if most_recent:
fname = retrieve_most_recent_fname(fpath)
else:
fname = 'window-1_bigram-False_hiddenSize-20_crossEntropy-2.414_bestIter-79-maxEpoch-80_opt-SGD_l2reg-0.0100.pkl'
with open(os.path.join(fpath, fname), 'rb') as p:
results = pickle.load(p)
parser = results['parser']
print parser.idxs_and_shapes
W_vect = results['W']
syms = results['syms']
# this is the first layer weights
in_W = parser.get(W_vect, ('weights', 1))
print 'cross_entropy: %.4f' % results['cross_entropy']
print 'best iteration: %d' % results['iter']
if return_fname:
return in_W, syms, fname
else:
return in_W, syms
def plot_SGD_chord2vec_weights(most_recent=False, highlight_syms=None,
zoom=False):
in_W, syms, fname = retrieve_chord2vec_weights(return_fname=True)
in_W, syms, retrieve_chord2vec_weights(most_recent)
if highlight_syms is None:
highlight_syms = ['C', 'F', 'G']
tag = os.path.splitext(os.path.split(fname)[-1])[0]
print 'tag:', tag
if not zoom:
plot_vec(in_W, syms, highlight_syms=highlight_syms, tag=tag)
else:
plot_zoom_vec(in_W, syms, highlight_syms=highlight_syms, tag=tag+"_zoomed")
def plot_CG_chord2vec_weights(highlight_syms=None):
# Chord2Vec embedding optimized with conjugate gradients (CG)
from retrieve_model_tools import retrieve_SkipGramNN
nn = retrieve_SkipGramNN()
if highlight_syms is None:
highlight_syms = ['C', 'F', 'G']
tag = 'previously'
plot_vec(nn.W1, nn.syms, highlight_syms=highlight_syms, tag=tag)
if __name__ == '__main__':
retrieve_chord2vec_weights()
# retrieve_previous_weights()
|
from __future__ import division, print_function
from ._version import version_info, __version__
|
import os
import re
import io
import requests
import numpy as np
import tensorflow as tf
from zipfile import ZipFile
from tensorflow.python.framework import ops
ops.reset_default_graph()
tf.app.flags.DEFINE_string("storage_folder", "temp", "Where to store model and data.")
tf.app.flags.DEFINE_float('learning_rate', 0.0005, 'Initial learning rate.')
tf.app.flags.DEFINE_float('dropout_prob', 0.5, 'Per to keep probability for dropout.')
tf.app.flags.DEFINE_integer('epochs', 20, 'Number of epochs for training.')
tf.app.flags.DEFINE_integer('batch_size', 250, 'Batch Size for training.')
tf.app.flags.DEFINE_integer('max_sequence_length', 20, 'Max sentence length in words.')
tf.app.flags.DEFINE_integer('rnn_size', 15, 'RNN feature size.')
tf.app.flags.DEFINE_integer('embedding_size', 25, 'Word embedding size.')
tf.app.flags.DEFINE_integer('min_word_frequency', 20, 'Word frequency cutoff.')
FLAGS = tf.app.flags.FLAGS
def get_data(storage_folder=FLAGS.storage_folder, data_file="text_data.txt"):
"""
This function gets the spam/ham data. It will download it if it doesn't
already exist on disk (at specified folder/file location).
"""
# Make a storage folder for models and data
if not os.path.exists(storage_folder):
os.makedirs(storage_folder)
if not os.path.isfile(os.path.join(storage_folder, data_file)):
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
# Save data to text file
with open(os.path.join(storage_folder, data_file), 'w') as file_conn:
for text in text_data:
file_conn.write("{}\n".format(text))
else:
# Open data from text file
text_data = []
with open(os.path.join(storage_folder, data_file), 'r') as file_conn:
for row in file_conn:
text_data.append(row)
text_data = text_data[:-1]
text_data = [x.split('\t') for x in text_data if len(x)>=1]
[y_data, x_data] = [list(x) for x in zip(*text_data)]
return(x_data, y_data)
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return(text_string)
def rnn_model(x_data_ph, max_sequence_length, vocab_size, embedding_size,
rnn_size, dropout_keep_prob):
# Create embedding
embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data_ph)
# Define the RNN cell
cell = tf.nn.rnn_cell.BasicRNNCell(num_units = rnn_size)
output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
output = tf.nn.dropout(output, dropout_keep_prob)
# Get output of RNN sequence
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
bias = tf.Variable(tf.constant(0.1, shape=[2]))
logits_out = tf.nn.softmax(tf.matmul(last, weight) + bias)
return(logits_out)
def get_accuracy(logits, actuals):
# Calulate if each output is correct
batch_acc = tf.equal(tf.argmax(logits, 1), tf.cast(actuals, tf.int64))
# Convert logical to float
batch_acc = tf.cast(batch_acc, tf.float32)
return(batch_acc)
def main(args):
# Set verbosity to get more information from Tensorflow
tf.logging.set_verbosity(tf.logging.INFO)
# Create a visualizer object for Tensorboard viewing
summary_writer = tf.train.SummaryWriter('tensorboard', tf.get_default_graph())
# Create tensorboard folder if not exists
if not os.path.exists('tensorboard'):
os.makedirs('tensorboard')
# Set model parameters
storage_folder = FLAGS.storage_folder
learning_rate = FLAGS.learning_rate
epochs = FLAGS.epochs
run_unit_tests = FLAGS.run_unit_tests
epochs = FLAGS.epochs
batch_size = FLAGS.batch_size
max_sequence_length = FLAGS.max_sequence_length
rnn_size = FLAGS.rnn_size
embedding_size = FLAGS.embedding_size
min_word_frequency = FLAGS.min_word_frequency
# Get text->spam/ham data
x_data, y_data = get_data()
# Clean texts
x_data = [clean_text(x) for x in x_data]
# Change texts into numeric vectors
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,
min_frequency=min_word_frequency)
text_processed = np.array(list(vocab_processor.fit_transform(x_data)))
# Save vocab processor (for loading and future evaluation)
vocab_processor.save(os.path.join(storage_folder, "vocab"))
# Shuffle and split data
text_processed = np.array(text_processed)
y_data = np.array([1 if x=='ham' else 0 for x in y_data])
shuffled_ix = np.random.permutation(np.arange(len(y_data)))
x_shuffled = text_processed[shuffled_ix]
y_shuffled = y_data[shuffled_ix]
# Split train/test set
ix_cutoff = int(len(y_shuffled)*0.80)
x_train, x_test = x_shuffled[:ix_cutoff], x_shuffled[ix_cutoff:]
y_train, y_test = y_shuffled[:ix_cutoff], y_shuffled[ix_cutoff:]
vocab_size = len(vocab_processor.vocabulary_)
with tf.Graph().as_default():
sess = tf.Session()
# Define placeholders
x_data_ph = tf.placeholder(tf.int32, [None, max_sequence_length], name='x_data_ph')
y_output_ph = tf.placeholder(tf.int32, [None], name='y_output_ph')
dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
# Define Model
rnn_model_outputs = rnn_model(x_data_ph, max_sequence_length, vocab_size,
embedding_size, rnn_size, dropout_keep_prob)
# Prediction
# Although we won't use the following operation, we declare and name
# the probability outputs so that we can recall them later for evaluation
rnn_prediction = tf.nn.softmax(rnn_model_outputs, name="probability_outputs")
# Loss function
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(rnn_model_outputs, y_output_ph)
# Remember that for this loss function, logits=float32, labels=int32
loss = tf.reduce_mean(losses, name="loss")
# Model Accuracy Operation
accuracy = tf.reduce_mean(get_accuracy(rnn_model_outputs, y_output_ph), name="accuracy")
# Add scalar summaries for Tensorboard
with tf.name_scope('Scalar_Summaries'):
tf.scalar_summary('Loss', loss)
tf.scalar_summary('Accuracy', accuracy)
# Declare Optimizer/train step
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
# Declare summary merging operation
summary_op = tf.merge_all_summaries()
# Create a graph/Variable saving/loading operations
saver = tf.train.Saver()
init = tf.initialize_all_variables()
sess.run(init)
# Start training
for epoch in range(epochs):
# Shuffle training data
shuffled_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffled_ix]
y_train = y_train[shuffled_ix]
num_batches = int(len(x_train)/batch_size) + 1
#
for i in range(num_batches):
# Select train data
min_ix = i * batch_size
max_ix = np.min([len(x_train), ((i+1) * batch_size)])
x_train_batch = x_train[min_ix:max_ix]
y_train_batch = y_train[min_ix:max_ix]
# Run train step
train_dict = {x_data_ph: x_train_batch,
y_output_ph: y_train_batch,
dropout_keep_prob:0.5}
_, summary = sess.run([train_step, summary_op], feed_dict=train_dict)
summary_writer = tf.train.SummaryWriter('tensorboard')
summary_writer.add_summary(summary, i)
# Run loss and accuracy for training
temp_train_loss, temp_train_acc = sess.run([loss, accuracy], feed_dict=train_dict)
test_dict = {x_data_ph: x_test, y_output_ph: y_test, dropout_keep_prob:1.0}
temp_test_loss, temp_test_acc = sess.run([loss, accuracy], feed_dict=test_dict)
# Print Epoch Summary
print('Epoch: {}, Test Loss: {:.2}, Test Acc: {:.2}'.format(epoch+1, temp_test_loss, temp_test_acc))
# Save model every epoch
saver.save(sess, os.path.join(storage_folder, "model.ckpt"))
if __name__ == "__main__":
tf.app.run()
|
import feedparser
import urllib
import sys
DOWNLOADS_DIR = '/home/gberg/podcasts/'
DOWNLOADED_LIST_FILE = '/home/gberg/.podyoink/downloaded.list'
PODCAST_URLS = ['http://downloads.bbc.co.uk/podcasts/radio4/fricomedy/rss.xml','http://downloads.bbc.co.uk/podcasts/radio4/comedy/rss.xml','http://downloads.bbc.co.uk/podcasts/radio4extra/newsjack/rss.xml']
for PODCAST_URL in PODCAST_URLS:
feed = feedparser.parse(PODCAST_URL)
downloadedListFile = open(DOWNLOADED_LIST_FILE)
downloadedList = downloadedListFile.read().splitlines()
downloadedListFile.close()
for item in feed.entries:
print "Entry Found:", item.link
fileName = item.link.split('/')[-1]
if fileName in downloadedList:
print "Not a new file. Skipping..."
continue
print "New File. Downloading..."
urllib.urlretrieve(item.link, DOWNLOADS_DIR+'/'+item.title +' - '+ fileName)
listFile = open(DOWNLOADED_LIST_FILE, 'a')
listFile.write(fileName)
listFile.write("\n")
listFile.close()
print "Done"
|
from .conversation import *
from .forum import *
from .pipe import *
|
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import AuthenticationService
from org.gluu.oxauth.service.common import UserService
from org.gluu.service import CacheService
from org.gluu.util import StringHelper
from org.gluu.persist.exception import AuthenticationException
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
from java.time import LocalDateTime, Duration
from java.time.format import DateTimeFormatter
import java
import datetime
import json
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Basic (lock account). Initialization"
self.invalidLoginCountAttribute = "oxCountInvalidLogin"
if configurationAttributes.containsKey("invalid_login_count_attribute"):
self.invalidLoginCountAttribute = configurationAttributes.get("invalid_login_count_attribute").getValue2()
else:
print "Basic (lock account). Initialization. Using default attribute"
self.maximumInvalidLoginAttemps = 3
if configurationAttributes.containsKey("maximum_invalid_login_attemps"):
self.maximumInvalidLoginAttemps = StringHelper.toInteger(configurationAttributes.get("maximum_invalid_login_attemps").getValue2())
else:
print "Basic (lock account). Initialization. Using default number attempts"
self.lockExpirationTime = 180
if configurationAttributes.containsKey("lock_expiration_time"):
self.lockExpirationTime = StringHelper.toInteger(configurationAttributes.get("lock_expiration_time").getValue2())
else:
print "Basic (lock account). Initialization. Using default lock expiration time"
print "Basic (lock account). Initialized successfully. invalid_login_count_attribute: '%s', maximum_invalid_login_attemps: '%s', lock_expiration_time: '%s'" % (self.invalidLoginCountAttribute, self.maximumInvalidLoginAttemps, self.lockExpirationTime)
return True
def destroy(self, configurationAttributes):
print "Basic (lock account). Destroy"
print "Basic (lock account). Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
if step == 1:
print "Basic (lock account). Authenticate for step 1"
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
cacheService = CdiUtil.bean(CacheService)
userService = CdiUtil.bean(UserService)
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
try:
logged_in = authenticationService.authenticate(user_name, user_password)
except AuthenticationException:
print "Basic (lock account). Authenticate. Failed to authenticate user '%s'" % user_name
if logged_in:
self.setUserAttributeValue(user_name, self.invalidLoginCountAttribute, StringHelper.toString(0))
else:
countInvalidLoginArributeValue = self.getUserAttributeValue(user_name, self.invalidLoginCountAttribute)
userSatus = self.getUserAttributeValue(user_name, "gluuStatus")
print "Current user '%s' status is '%s'" % ( user_name, userSatus )
countInvalidLogin = StringHelper.toInteger(countInvalidLoginArributeValue, 0)
if countInvalidLogin < self.maximumInvalidLoginAttemps:
countInvalidLogin = countInvalidLogin + 1
remainingAttempts = self.maximumInvalidLoginAttemps - countInvalidLogin
print "Remaining login count attempts '%s' for user '%s'" % ( remainingAttempts, user_name )
self.setUserAttributeValue(user_name, self.invalidLoginCountAttribute, StringHelper.toString(countInvalidLogin))
if remainingAttempts > 0 and userSatus == "active":
facesMessages.add(FacesMessage.SEVERITY_INFO, StringHelper.toString(remainingAttempts)+" more attempt(s) before account is LOCKED!")
if (countInvalidLogin >= self.maximumInvalidLoginAttemps) and ((userSatus == None) or (userSatus == "active")):
print "Basic (lock account). Locking '%s' for '%s' seconds" % ( user_name, self.lockExpirationTime)
self.lockUser(user_name)
return False
if (countInvalidLogin >= self.maximumInvalidLoginAttemps) and userSatus == "inactive":
print "Basic (lock account). User '%s' is locked. Checking if we can unlock him" % user_name
unlock_and_authenticate = False
object_from_store = cacheService.get(None, "lock_user_" + user_name)
if object_from_store == None:
# Object in cache was expired. We need to unlock user
print "Basic (lock account). User locking details for user '%s' not exists" % user_name
unlock_and_authenticate = True
else:
# Analyze object from cache
user_lock_details = json.loads(object_from_store)
user_lock_details_locked = user_lock_details['locked']
user_lock_details_created = user_lock_details['created']
user_lock_details_created_date = LocalDateTime.parse(user_lock_details_created, DateTimeFormatter.ISO_LOCAL_DATE_TIME)
user_lock_details_created_diff = Duration.between(user_lock_details_created_date, LocalDateTime.now()).getSeconds()
print "Basic (lock account). Get user '%s' locking details. locked: '%s', Created: '%s', Difference in seconds: '%s'" % ( user_name, user_lock_details_locked, user_lock_details_created, user_lock_details_created_diff )
if user_lock_details_locked and user_lock_details_created_diff >= self.lockExpirationTime:
print "Basic (lock account). Unlocking user '%s' after lock expiration" % user_name
unlock_and_authenticate = True
if unlock_and_authenticate:
self.unLockUser(user_name)
self.setUserAttributeValue(user_name, self.invalidLoginCountAttribute, StringHelper.toString(0))
logged_in = authenticationService.authenticate(user_name, user_password)
if not logged_in:
# Update number of attempts
self.setUserAttributeValue(user_name, self.invalidLoginCountAttribute, StringHelper.toString(1))
if self.maximumInvalidLoginAttemps == 1:
# Lock user if maximum count login attempts is 1
self.lockUser(user_name)
return False
return logged_in
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if step == 1:
print "Basic (lock account). Prepare for Step 1"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 1
def getPageForStep(self, configurationAttributes, step):
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True
def getUserAttributeValue(self, user_name, attribute_name):
if StringHelper.isEmpty(user_name):
return None
userService = CdiUtil.bean(UserService)
find_user_by_uid = userService.getUser(user_name, attribute_name)
if find_user_by_uid == None:
return None
custom_attribute_value = userService.getCustomAttribute(find_user_by_uid, attribute_name)
if custom_attribute_value == None:
return None
attribute_value = custom_attribute_value.getValue()
print "Basic (lock account). Get user attribute. User's '%s' attribute '%s' value is '%s'" % (user_name, attribute_name, attribute_value)
return attribute_value
def setUserAttributeValue(self, user_name, attribute_name, attribute_value):
if StringHelper.isEmpty(user_name):
return None
userService = CdiUtil.bean(UserService)
find_user_by_uid = userService.getUser(user_name)
if find_user_by_uid == None:
return None
userService.setCustomAttribute(find_user_by_uid, attribute_name, attribute_value)
updated_user = userService.updateUser(find_user_by_uid)
print "Basic (lock account). Set user attribute. User's '%s' attribute '%s' value is '%s'" % (user_name, attribute_name, attribute_value)
return updated_user
def lockUser(self, user_name):
if StringHelper.isEmpty(user_name):
return None
userService = CdiUtil.bean(UserService)
cacheService= CdiUtil.bean(CacheService)
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.setKeepMessages()
find_user_by_uid = userService.getUser(user_name)
if (find_user_by_uid == None):
return None
status_attribute_value = userService.getCustomAttribute(find_user_by_uid, "gluuStatus")
if status_attribute_value != None:
user_status = status_attribute_value.getValue()
if StringHelper.equals(user_status, "inactive"):
print "Basic (lock account). Lock user. User '%s' locked already" % user_name
return
userService.setCustomAttribute(find_user_by_uid, "gluuStatus", "inactive")
userService.setCustomAttribute(find_user_by_uid, "oxTrustActive", "false")
updated_user = userService.updateUser(find_user_by_uid)
object_to_store = json.dumps({'locked': True, 'created': LocalDateTime.now().toString()}, separators=(',',':'))
cacheService.put(StringHelper.toString(self.lockExpirationTime), "lock_user_"+user_name, object_to_store);
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Your account is locked. Please try again after " + StringHelper.toString(self.lockExpirationTime) + " secs")
print "Basic (lock account). Lock user. User '%s' locked" % user_name
def unLockUser(self, user_name):
if StringHelper.isEmpty(user_name):
return None
userService = CdiUtil.bean(UserService)
cacheService= CdiUtil.bean(CacheService)
find_user_by_uid = userService.getUser(user_name)
if (find_user_by_uid == None):
return None
object_to_store = json.dumps({'locked': False, 'created': LocalDateTime.now().toString()}, separators=(',',':'))
cacheService.put(StringHelper.toString(self.lockExpirationTime), "lock_user_"+user_name, object_to_store);
userService.setCustomAttribute(find_user_by_uid, "gluuStatus", "active")
userService.setCustomAttribute(find_user_by_uid, "oxTrustActive", "true")
userService.setCustomAttribute(find_user_by_uid, self.invalidLoginCountAttribute, None)
updated_user = userService.updateUser(find_user_by_uid)
print "Basic (lock account). Lock user. User '%s' unlocked" % user_name
|
import json
import logging
import typing
import urllib.request
from discord.ext import commands
from discord.ext.commands import CommandError, Context
from cogbot import checks
from cogbot.cog_bot import CogBot
log = logging.getLogger(__name__)
class FAQEntry:
def __init__(self, key: str, tags: typing.Iterable[str], message: str, hidden: bool = False):
self.key: str = str(key)
self.tags: typing.Set[str] = set(tags)
self.message: str = str(message)
self.hidden: bool = hidden
class FaqConfig:
def __init__(self, **options):
self.database: str = str(options['database'])
class Faq:
def __init__(self, bot: CogBot, ext: str):
self.bot = bot
self.config = FaqConfig(**bot.state.get_extension_state(ext))
self.entries_by_key: typing.Dict[str, FAQEntry] = {}
self.entries_by_tag: typing.Dict[str, typing.List[FAQEntry]] = {}
self.available_faqs_text = ''
def extract_tags(self, key: str) -> typing.List[str]:
return [k[1:] if k.startswith('#') else k for k in key.split()]
def format_keys(self, keys: typing.Iterable[str]) -> str:
return ''.join(('`', '`, `'.join(keys), '`'))
def get_all_keys(self):
return self.entries_by_key.keys()
def get_visible_keys(self):
return (key for key, entry in self.entries_by_key.items() if not entry.hidden)
def get_entry_by_key(self, key: str) -> FAQEntry:
return self.entries_by_key.get(key)
def get_entries_by_tag(self, tag: str) -> typing.List[FAQEntry]:
return self.entries_by_tag.get(tag)
def get_entries_by_tags(self, tags: typing.Iterable[str]) -> typing.List[FAQEntry]:
tags_tuple = tuple(tags)
tags_set = set(tags_tuple)
initial_entries = self.get_entries_by_tag(tags_tuple[0])
# start with all results for the first tag
# take the intersection of remaining queries
if initial_entries:
return [entry for entry in initial_entries if tags_set.issubset(entry.tags)]
else:
return []
def get_entries_cascading(self, key: str) -> typing.List[FAQEntry]:
# see if there's an exact match
# if not, split the key into tags and search by those
entry = self.get_entry_by_key(key)
if entry:
return [entry]
else:
return self.get_entries_by_tags(key.split())
def get_entries_strict(self, key: str) -> typing.List[FAQEntry]:
# only check tags if the key starts with a hashtag
# otherwise look for the exact key
if key.startswith('#'):
return self.get_entries_by_tags(self.extract_tags(key))
else:
entry = self.get_entry_by_key(key)
if entry:
return [entry]
else:
return []
def reload_data(self):
log.info('Reloading FAQs from: {}'.format(self.config.database))
if self.config.database.startswith(('http://', 'https://')):
try:
response = urllib.request.urlopen(self.config.database)
content = response.read().decode('utf8')
data = json.loads(content)
except Exception as e:
raise CommandError('Failed to reload FAQs: {}'.format(e))
else:
with open(self.config.database) as fp:
data = json.load(fp)
# parse data and precompile messages
# also create map of tags to entries
entries_by_key: typing.Dict[FAQEntry] = {}
entries_by_tag: typing.Dict[typing.List[FAQEntry]] = {}
for key, raw_entry in data.items():
if isinstance(raw_entry, dict):
# list becomes lines
raw_message = raw_entry['message']
message = '\n'.join(raw_message) if isinstance(raw_message, list) else str(raw_message)
# tags are split by whitespace
tags = raw_entry.get('tags', '').split()
hidden = raw_entry.get('hidden')
entry = FAQEntry(key=key, tags=tags, message=message, hidden=hidden)
entries_by_key[key] = entry
for tag in tags:
if tag not in entries_by_tag:
entries_by_tag[tag] = []
entries_by_tag[tag].append(entry)
# process aliases
aliases = raw_entry.get('aliases', [])
for alias in aliases:
entries_by_key[alias] = entry
else:
log.error('Invalid FAQ entry "{}": {}'.format(key, raw_entry))
self.entries_by_key = entries_by_key
self.entries_by_tag = entries_by_tag
self.available_faqs_text = 'Available FAQs: ' + self.format_keys(sorted(self.get_visible_keys()))
log.info('Successfully reloaded {} FAQs'.format(len(data)))
async def on_ready(self):
self.reload_data()
@commands.command(pass_context=True, name='faq')
async def cmd_faq(self, ctx: Context, *, key: str = ''):
if key:
entries = self.get_entries_strict(key)
if entries:
for entry in entries:
await self.bot.say(entry.message)
else:
suggest_entries = self.get_entries_by_tags(key.split())
# if there's exactly one suggestion, just go with it
if len(suggest_entries) == 1:
await self.bot.say(suggest_entries[0].message)
# or if there are more than one, suggest them
elif suggest_entries:
await self.bot.add_reaction(ctx.message, u'🤔')
suggest_text = 'Maybe you meant: ' + self.format_keys(entry.key for entry in suggest_entries)
await self.bot.say(suggest_text)
# otherwise shrug
else:
await self.bot.add_reaction(ctx.message, u'🤷')
else:
await self.bot.say(self.available_faqs_text)
@checks.is_manager()
@commands.command(pass_context=True, name='faqreload', hidden=True)
async def cmd_faqreload(self, ctx: Context):
try:
self.reload_data()
await self.bot.react_success(ctx)
except:
await self.bot.react_failure(ctx)
def setup(bot):
bot.add_cog(Faq(bot, __name__))
|
import json
import time
from requests.exceptions import RequestException, ConnectionError, Timeout
from steam_alerts import logger
from steam_alerts.messaging_service import MessagingService, MockMessagingService
from steam_alerts.person import Person
from steam_alerts.steam_service import SteamService, status_types
class PollService:
def __init__(self, config_path):
with open(config_path) as file:
config = json.load(file)
self.steam_key = config['steam_key']
self.twilio_sid = config['twilio_sid']
self.twilio_auth_token = config['twilio_auth_token']
self.twilio_number = config['twilio_number']
self.poll_rate = config.get('poll_rate', 60)
self.message_rate = config.get('message_rate', 60 * 5)
self.messages = config['messages']
self.debug = config.get('debug', False)
self.game_list = config['game_list']
self.people = {}
self.steam_service = SteamService(self.steam_key)
if self.debug:
self.messaging_service = MockMessagingService(self.twilio_sid, self.twilio_auth_token, self.twilio_number,
self.messages)
else:
self.messaging_service = MessagingService(self.twilio_sid, self.twilio_auth_token, self.twilio_number,
self.messages)
for player_json in config['players']:
if player_json['phone_number'] != "" and player_json['steam_id'] != "":
person = Person(**player_json)
logger.info("Adding {}".format(person.name))
self.people[person.steam_id] = person
def run_loop(self):
logger.info('Starting run loop.')
while True:
try:
statuses = self.steam_service.get_player_statuses(','.join(self.people.keys()))
except ConnectionError:
logger.error('Connection Error: The Steam server may be down, we may have lost internet access')
except Timeout:
logger.error('Timeout Error: The Steam server took too long to respond. Check your connection.')
except RequestException as e:
logger.error(e)
logger.error('An error occurred while trying to retrieve user statuses.')
except Exception as e:
logger.error(e)
logger.error('An unknown error occurred while trying to retrieve user statuses.')
else:
for status in statuses:
person = self.people[status['steamid']]
persona_state = status['personastate']
old_state = status_types.get(person.persona_state, None)
new_state = status_types[persona_state]
old_game = person.game
new_game = status.get('gameextrainfo')
person.persona_state = persona_state
person.game = new_game
msg = '{} is {}'.format(person.name, new_state)
can_send = person.last_messaged is None or ((time.time() - person.last_messaged) >= self.message_rate)
if new_game is not None:
msg += ' and is playing "{}"'.format(new_game)
else:
msg += ' and not in a game'
if old_state != new_state or new_game != old_game:
logger.info(msg)
if person.game is not None and can_send:
for game in self.game_list:
if game.lower() in person.game.lower():
try:
self.messaging_service.send_message(person, game)
person.last_messaged = time.time()
except ConnectionError:
logger.error('Connection Error: The Twilio server may be down, or we may have lost internet access')
except Timeout:
logger.error('Timeout Error: The Twilio server took too long to respond. Check your connection.')
except RequestException as e:
logger.error(e)
logger.error('An error occurred while trying to send an annoying text message.')
finally:
# Game was found, no need to keep searching
break
time.sleep(self.poll_rate)
def start(self):
self.run_loop()
|
from collections import Counter
def find_it(seq):
for k, v in Counter(seq).iteritems():
if v % 2:
return k
|
from django.test import TestCase
from django.urls import reverse
from monitor.tests.utils.http_client_mixin import HTTPClientMixin
class TestTokenRetrieveView(HTTPClientMixin, TestCase):
def test_get(self):
url = reverse('monitor:token')
response = self.client.get(url)
self.assertEqual(response.data.get('key'), self.token.key)
|
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_cloud_storage_object_info
except ImportError:
bt_cloud_storage_object_info = sys.modules[
"onshape_client.oas.models.bt_cloud_storage_object_info"
]
class BTCloudStorageAccountInfoAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"cloud_storage_account_id": (str,), # noqa: E501
"cloud_storage_provider": (int,), # noqa: E501
"enabled": (bool,), # noqa: E501
"export_folder": (
bt_cloud_storage_object_info.BTCloudStorageObjectInfo,
), # noqa: E501
"import_folder": (
bt_cloud_storage_object_info.BTCloudStorageObjectInfo,
), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"cloud_storage_account_id": "cloudStorageAccountId", # noqa: E501
"cloud_storage_provider": "cloudStorageProvider", # noqa: E501
"enabled": "enabled", # noqa: E501
"export_folder": "exportFolder", # noqa: E501
"import_folder": "importFolder", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_cloud_storage_account_info_all_of.BTCloudStorageAccountInfoAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
cloud_storage_account_id (str): [optional] # noqa: E501
cloud_storage_provider (int): [optional] # noqa: E501
enabled (bool): [optional] # noqa: E501
export_folder (bt_cloud_storage_object_info.BTCloudStorageObjectInfo): [optional] # noqa: E501
import_folder (bt_cloud_storage_object_info.BTCloudStorageObjectInfo): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.