commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
78e758925bff73e52867b671b246a391f87cf945 | remove commented lines. | homeassistant/components/sensor/speedtest.py | homeassistant/components/sensor/speedtest.py | """
homeassistant.components.sensor.speedtest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Speedtest.net sensor based on speedtest-cli.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import logging
import sys
import re
from datetime import timedelta
from subprocess import check_output
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['speedtest-cli==0.3.4']
_LOGGER = logging.getLogger(__name__)
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms\nDownload:\s(\d+\.\d+)'
r'\sMbit/s\nUpload:\s(\d+\.\d+)\sMbit/s\n')
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Setup the Speedtest sensor. """
data = SpeedtestData(hass.config.path)
dev = []
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(SpeedtestSensor(data, variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
""" Implements a speedtest.net sensor. """
def __init__(self, speedtest_data, sensor_type):
self.client_name = 'Speedtest'
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self.update()
@property
def name(self):
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the latest data from Forecast.io and updates the states. """
self.speedtest_client.update()
data = self.speedtest_client.data
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
class SpeedtestData(object):
""" Gets the latest data from speedtest.net. """
def __init__(self, path):
self.data = None
self.path = path
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
""" Gets the latest data from speedtest.net. """
_LOGGER.info('Executing speedtest')
re_output = _SPEEDTEST_REGEX.split(
check_output([sys.executable, self.path(
'lib', 'speedtest_cli.py'), '--simple']).decode("utf-8"))
self.data = {'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2)}
| """
homeassistant.components.sensor.speedtest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Speedtest.net sensor based on speedtest-cli.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import logging
import sys
import re
from datetime import timedelta
from subprocess import check_output
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['speedtest-cli==0.3.4']
_LOGGER = logging.getLogger(__name__)
# _SPEEDTEST_REGEX = re.compile('Ping:\s(\d+\.\d+)\sms\\nDownload:\s(\d+\.\d+)'
# '\sMbit/s\\nUpload:\s(\d+\.\d+)\sMbit/s\\n')
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms\nDownload:\s(\d+\.\d+)'
r'\sMbit/s\nUpload:\s(\d+\.\d+)\sMbit/s\n')
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Setup the Speedtest sensor. """
data = SpeedtestData(hass.config.path)
dev = []
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(SpeedtestSensor(data, variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
""" Implements a speedtest.net sensor. """
def __init__(self, speedtest_data, sensor_type):
self.client_name = 'Speedtest'
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self.update()
@property
def name(self):
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the latest data from Forecast.io and updates the states. """
self.speedtest_client.update()
data = self.speedtest_client.data
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
class SpeedtestData(object):
""" Gets the latest data from speedtest.net. """
def __init__(self, path):
self.data = None
self.path = path
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
""" Gets the latest data from speedtest.net. """
_LOGGER.info('Executing speedtest')
re_output = _SPEEDTEST_REGEX.split(
check_output([sys.executable, self.path(
'lib', 'speedtest_cli.py'), '--simple']).decode("utf-8"))
self.data = {'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2)}
| Python | 0 |
90dbc7695af9cc4b83273e774a8e3f6eb0847170 | Maximum sum of any given path | Arrays/maximum_sum_path.py | Arrays/maximum_sum_path.py | import unittest
"""
Given two sorted arrays such that the arrays may have some common elements, find the maximum sum path
to reach from beginning of any array to end of any array. We can switch from one array to another array
only at common elements.
Input: arr1: 2 3 7 10 12 arr2: 1 5 7 8
Output: 35 (1 + 5 + 7 + 10 + 12)
"""
"""
Approach:
1. Scan both arrays from left to right.
2. Keep two running sums, sum1 and sum2 for the two arrays.
3. Move pointer in array whose current element is smaller among the two, and add that element to
respective running sum.
4. When the corresponding elements from the two arrays are equal, take max(sum1, sum2) till that point,
and add the equal element to the max value.
5. Return the overall max sum as the maximum sum path.
"""
def maximum_sum_of_a_path(list1, list2):
end1 = len(list1)
end2 = len(list2)
sum1 = 0
sum2 = 0
max_sum = 0
i = 0
j = 0
while i < end1 and j < end2:
if list1[i] < list2[j]:
sum1 += list1[i]
i += 1
elif list1[i] > list2[j]:
sum2 += list2[j]
j += 1
else:
max_sum += max([sum1, sum2])
max_sum += list1[i]
i += 1
j += 1
sum1 = 0
sum2 = 0
while i < end1:
sum1 += list1[i]
i += 1
while j < end2:
sum2 += list2[j]
j += 1
max_sum += max([sum1, sum2])
return max_sum
class TestMaxSumPath(unittest.TestCase):
def test_max_sum_path(self):
arr1 = [2, 3, 7, 10, 12]
arr2 = [1, 5, 7, 8]
self.assertEqual(maximum_sum_of_a_path(arr1, arr2), 35)
arr1 = [10, 12]
arr2 = [5, 7, 9]
self.assertEqual(maximum_sum_of_a_path(arr1, arr2), 22)
arr1 = [2, 3, 7, 10, 12, 15, 30, 34]
arr2 = [1, 5, 7, 8, 10, 15, 16, 19]
self.assertEqual(maximum_sum_of_a_path(arr1, arr2), 122)
| Python | 0.999862 | |
ef06864a991572d7ae610f9a249b024f967b1eb9 | Add test.util.mock_call_with_name | linkins/test/util.py | linkins/test/util.py | import mock
class mock_call_with_name(object):
"""Like mock.call but takes the name of the call as its first
argument. mock.call requires chained methods to define its
name. This can be a problem, for example, if you need create
mock.call().__enter__().__iter__(). You can optionally use
mock._Call but you might as well use a tuple since its constructor
requires a tuple of the form (name, args, kwargs).
"""
def __new__(self, name, *args, **kwargs):
return mock._Call(
(name, args, kwargs)
)
| Python | 0.000008 | |
dba311375a0f4cda1a3c522f5ac261dfb601b9c5 | Create gee_init.py | pyEOM/gee_init.py | pyEOM/gee_init.py | MY_SERVICE_ACCOUNT = ''
MY_PRIVATE_KEY_FILE = ''
| Python | 0.000044 | |
e3a62fcc29fb8473a70dd6d3c82f51f8f1fc4d92 | Add unit tests. | P2B_Tests.py | P2B_Tests.py | from difflib import context_diff
import glob
import os
import ProQuest2Bepress as P2B
import shutil
import subprocess
import sys
import unittest
from collections import Counter
class TestFileMethods(unittest.TestCase):
def setUp(self):
P2B.load_config()
rm_files = glob.glob(P2B.UPLOAD_DIR + "*")
for f in rm_files:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
def tearDown(self):
rm_files = glob.glob(P2B.UPLOAD_DIR + "*")
for f in rm_files:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
def addFiles(self):
shutil.copy("./TestFiles/etdadmin_upload_362096.zip", P2B.UPLOAD_DIR)
shutil.copy("./TestFiles/etdadmin_upload_362658.zip", P2B.UPLOAD_DIR)
def test_poll_uploaddir(self):
self.addFiles()
self.assertEqual(Counter(P2B.poll_uploaddir([])), Counter([P2B.UPLOAD_DIR + 'etdadmin_upload_362096.zip', P2B.UPLOAD_DIR + 'etdadmin_upload_362658.zip']))
def test_unzip(self):
self.addFiles()
path_result = P2B.unzip(P2B.UPLOAD_DIR + 'etdadmin_upload_362096.zip')
self.assertTrue(os.path.exists(P2B.UPLOAD_DIR + "etdadmin_upload_362096"))
self.assertTrue(os.path.exists(P2B.UPLOAD_DIR + "etdadmin_upload_362096/Shashe_ed.depaul_0937F_10005_DATA.xml"))
self.assertTrue(os.path.exists(P2B.UPLOAD_DIR + "etdadmin_upload_362096/Shashe_ed.depaul_0937F_10005.pdf"))
class TestTransformationMethods(unittest.TestCase):
def setUp(self):
P2B.load_config()
rm_files = glob.glob(P2B.UPLOAD_DIR + "*")
for f in rm_files:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
def tearDown(self):
rm_files = glob.glob(P2B.UPLOAD_DIR + "*")
for f in rm_files:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
def addFiles(self):
shutil.copy("./TestFiles/etdadmin_upload_362096.zip", P2B.UPLOAD_DIR)
shutil.copy("./TestFiles/etdadmin_upload_362658.zip", P2B.UPLOAD_DIR)
def test_transform_files(self):
self.addFiles()
P2B.unzip(P2B.UPLOAD_DIR + 'etdadmin_upload_362096.zip')
P2B.unzip(P2B.UPLOAD_DIR + 'etdadmin_upload_362658.zip')
# Test etdadmin_upload_362096.zip
print "Testing etdadmin_upload_362096.zip..."
P2B.transform_files(P2B.UPLOAD_DIR + 'etdadmin_upload_362096/')
self.assertTrue(os.path.exists(P2B.UPLOAD_DIR + "etdadmin_upload_362096/etdadmin_upload_362096_Output.xml"))
with open(P2B.UPLOAD_DIR + "etdadmin_upload_362096/etdadmin_upload_362096_Output.xml") as output_f:
with open("./TestFiles/etdadmin_upload_362096_Output_Correct.xml") as correct_f:
print "Testing etdadmin_upload_362096_Output.xml..."
output_text = output_f.readlines()
correct_text = correct_f.readlines()
for line in context_diff(correct_text, output_text, fromfile='etdadmin_upload_362096_Output_Correct.xml', tofile='etdadmin_upload_362096_Output.xml'):
sys.stdout.write(line)
#self.assertEqual(output_text, correct_text)
# Test etdadmin_upload_362658.zip
print "Testing etdadmin_upload_362658.zip..."
P2B.transform_files(P2B.UPLOAD_DIR + 'etdadmin_upload_362658/')
self.assertTrue(os.path.exists(P2B.UPLOAD_DIR + "etdadmin_upload_362658/etdadmin_upload_362658_Output.xml"))
with open(P2B.UPLOAD_DIR + "etdadmin_upload_362658/etdadmin_upload_362658_Output.xml") as output_f:
with open("./TestFiles/etdadmin_upload_362658_Output_Correct.xml") as correct_f:
print "Testing etdadmin_upload_362658_Output.xml..."
output_text = output_f.readlines()
correct_text = correct_f.readlines()
for line in context_diff(correct_text, output_text, fromfile='etdadmin_upload_362658_Output_Correct.xml', tofile='etdadmin_upload_362658_Output.xml'):
sys.stdout.write(line)
#self.assertEqual(output_text, correct_text)
# Test Dropbox uploads
print "Testing if everything is in Dropbox..."
# TODO: Fix
self.assertEqual(subprocess.check_output([P2B.DBUPLOADER_PATH, "list", "etdadmin_upload_362096/"]), ' > Listing "/etdadmin_upload_362096/"... DONE\n [F] 2578 etdadmin_upload_362096_Output.xml\n [F] 2455004 Shashe_ed.depaul_0937F_10005.pdf\n')
self.assertEqual(subprocess.check_output([P2B.DBUPLOADER_PATH, "list", "etdadmin_upload_362658/"]), ' > Listing "/etdadmin_upload_362658/"... DONE\n [F] 2792 etdadmin_upload_362658_Output.xml\n [F] 58681 McCann Floeter 05212015 Electronic Theses and Disserations Approval Form.docx\n [F] 2006772 McCannFloeter_ed.depaul_0937F_10006.pdf\n')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestFileMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(TestTransformationMethods)
unittest.TextTestRunner(verbosity=2).run(suite) | Python | 0 | |
148991a27670d26a2eb29f0964078b4d656bbcec | Create __init__.py | pydyn/__init__.py | pydyn/__init__.py | # Copyright (C) 2014-2015 Julius Susanto. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Time-domain simulation engine
"""
| Python | 0.000429 | |
06e4fd4b7d4cc4c984a05887fce00f7c8bbdc174 | Add missing tests for messaging notifer plugin | tests/notifiers/test_messaging.py | tests/notifiers/test_messaging.py | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler._notifiers import base
from tests import test
class MessagingTestCase(test.TestCase):
def test_init_and_notify(self):
messaging = mock.MagicMock()
context = "context"
transport = "transport"
project = "project"
service = "service"
host = "host"
notify_func = base.Notifier.factory("Messaging", messaging, context,
transport, project, service, host)
messaging.Notifier.assert_called_once_with(
transport, publisher_id=host, driver="messaging", topic="profiler")
info = {
"a": 10
}
notify_func(info)
expected_data = {"project": project, "service": service}
expected_data.update(info)
messaging.Notifier().info.assert_called_once_with(
context, "profiler.%s" % service, expected_data)
messaging.reset_mock()
notify_func(info, context="my_context")
messaging.Notifier().info.assert_called_once_with(
"my_context", "profiler.%s" % service, expected_data)
| Python | 0.000002 | |
fb2af0db2fc6d2d63bb377d7818ed1d03cb5cc9a | add nqueens.py | python/nqueens.py | python/nqueens.py | #!/usr/bin/python
# http://code.activestate.com/recipes/576647-eight-queens-six-lines/
from itertools import permutations
N = 8
cols = range(N)
for perm in permutations(cols):
if (N == len(set(perm[i]-i for i in cols))
== len(set(perm[i]+i for i in cols))):
print perm
| Python | 0.001659 | |
4bfe33373ebf095623173f945757693997a65ce3 | Add a simple test for the new AWS::LanguageExtensions transform (#2074) | tests/test_language_extensions.py | tests/test_language_extensions.py | import unittest
from troposphere import AWSHelperFn, Parameter, Template
from troposphere.sqs import Queue
class TestServerless(unittest.TestCase):
def test_transform(self):
t = Template()
t.set_version("2010-09-09")
t.set_transform("AWS::LanguageExtensions")
self.assertEqual(
t.to_dict(),
{
"AWSTemplateFormatVersion": "2010-09-09",
"Transform": "AWS::LanguageExtensions",
"Resources": {},
},
)
def test_length_function(self):
class Length(AWSHelperFn):
def __init__(self, data: object) -> None:
self.data = {"Fn::Length": data}
t = Template()
t.set_version("2010-09-09")
t.set_transform("AWS::LanguageExtensions")
queue_list = t.add_parameter(Parameter("QueueList", Type="CommaDelimitedList"))
queue_name = t.add_parameter(
Parameter(
"QueueNameParam", Description="Name for your SQS queue", Type="String"
)
)
t.add_resource(
Queue(
"Queue",
QueueName=queue_name.ref(),
DelaySeconds=Length(queue_list.ref()),
)
)
self.assertEqual(
t.to_dict(),
{
"AWSTemplateFormatVersion": "2010-09-09",
"Transform": "AWS::LanguageExtensions",
"Parameters": {
"QueueList": {"Type": "CommaDelimitedList"},
"QueueNameParam": {
"Description": "Name for your SQS queue",
"Type": "String",
},
},
"Resources": {
"Queue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {"Ref": "QueueNameParam"},
"DelaySeconds": {"Fn::Length": {"Ref": "QueueList"}},
},
}
},
},
)
| Python | 0.000706 | |
157a7d00a9d650728495726e9217591a678ec5a9 | add docstrings for response | mailthon/response.py | mailthon/response.py | """
mailthon.response
~~~~~~~~~~~~~~~~~
Implements the Response objects.
"""
class Response(object):
"""
Encapsulates a (status_code, message) tuple
returned by a server when the ``NOOP``
command is called.
:param pair: A (status_code, message) pair.
"""
def __init__(self, pair):
status, message = pair
self.status_code = status
self.message = message
@property
def ok(self):
"""
Tells whether the Response object is ok-
that everything went well. Returns true
if the status code is 250, false otherwise.
"""
return self.status_code == 250
class SendmailResponse(Response):
"""
Encapsulates a (status_code, message) tuple
as well as a mapping of email-address to
(status_code, message) tuples that can be
attained by the NOOP and the SENDMAIL
command.
:param pair: The response pair.
:param rejected: Rejected receipients.
"""
def __init__(self, pair, rejected):
Response.__init__(self, pair)
self.rejected = {
addr: Response(pair)
for addr, pair in rejected.items()
}
@property
def ok(self):
"""
Returns True only if no addresses were
rejected and if the status code is 250.
"""
return (Response.ok.fget(self) and
not self.rejected)
| class Response(object):
def __init__(self, pair):
status, message = pair
self.status_code = status
self.message = message
@property
def ok(self):
return self.status_code == 250
class SendmailResponse(Response):
def __init__(self, pair, rejected):
Response.__init__(self, pair)
self.rejected = {
addr: Response(pair)
for addr, pair in rejected.items()
}
@property
def ok(self):
return (Response.ok.fget(self) and
not self.rejected)
| Python | 0.000001 |
1ee32dab5a8c90c857a127ba831be250ad153198 | Create rftest.py | mark_ii/pi/rftest.py | mark_ii/pi/rftest.py | #!/usr/bin/python
import piVirtualWire.piVirtualWire as piVirtualWire
import time
import pigpio
import struct
import requests
import logging
import logging.handlers
LOG_FILENAME = '/tmp/rftest.log'
def calcChecksum(packet):
checkSum = sum([ int(i) for i in packet[:13]])
return checkSum % 256
def sendToWeb(url):
print ('URL ', url)
r = requests.get(url)
return r.status_code
if __name__ == "__main__":
pi = pigpio.pi()
rx = piVirtualWire.rx(pi, 4, 2000) # Set pigpio instance, TX module GPIO pin and baud rate
# logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
logger = logging.getLogger('RFTLogger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME,
maxBytes=100000, backupCount=10)
logger.addHandler(handler)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
handler.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
fmt = "<LLLBB"
print ("Top of RF test")
logger.info('Top of RF test')
while True:
while rx.ready():
packet = rx.get()
if (14 == len(packet)):
print(packet)
id = packet[0] + packet[1]*256 + packet[2]*256*256 + packet[3]*256*256*256
data = packet[4] + packet[5]*256 + packet[6]*256*256 + packet[7]*256*256*256
packetCount = packet[8] + packet[9]*256 + packet[10]*256*256 + packet[11]*256*256*256
dataType = packet[12]
checksum = packet[13]
print ('Id: ', format(id,'X'))
print ('Data: ', format(data,'d'))
print ('Packet count: ', format(packetCount,'d'))
print ('Data type: ', format(dataType,'c'))
print ('Checksum: ', format(checksum,'X'))
calculatedChecksum = calcChecksum(packet)
print ('Calc Checksum: ', format(calculatedChecksum,'X'))
logger.debug('id=%X,data=%d,count=%d,type=%c,chk=%X,calc=%X' %
(id, data, packetCount, dataType, checksum, calculatedChecksum))
if (calculatedChecksum == checksum):
print ('Upload to server')
idx = format(id, 'X')
if (ord('+') == dataType):
if (0 == data):
url = "http://192.168.0.163/add_status.php?station=%X&message=Starting" %(id)
sendToWeb(url)
else :
url = "http://192.168.0.163/add_status.php?station=%X&message=%d cycles" %(id, data)
sendToWeb(url)
else:
url = "http://192.168.0.163/add_record.php?s=%X&r=%d&t=%c&n=%d" % (id, data, dataType, packetCount)
sendToWeb(url)
logger.debug('url=%s' % url)
time.sleep(0.01)
rx.cancel()
pi.stop()
| Python | 0 | |
3f43a5358bb58269846e21207bd570046b6aa711 | Create main_queue_thread.py | gateway/src/main_queue_thread.py | gateway/src/main_queue_thread.py | #!/usr/bin/env python
import threading, time
import queue
q = queue.Queue()
def Producer():
n = 0
while n < 1000:
n += 1
q.put(n)
# print('Producer has created %s' % n)
# time.sleep(0.1)
def Consumer():
count = 0
while count < 1000:
count += 1
data = q.get()
# print('Consumer has used %s' % data)
# time.sleep(0.2)
p = threading.Thread(target = Producer, name='')
c = threading.Thread(target = Consumer, name='')
import serial
import time
import json
import threading
from time import ctime,sleep
import queue
q = queue.Queue()
#ser = serial.Serial("/dev/ttyS0", 9600)
ser = serial.Serial("/dev/ttyS0", 9600, timeout=0.2)
recv = ''
def Lora(func):
global recv
while True:
#Waiting for LoRa module message from uart port.
count = ser.inWaiting()
if count != 0:
recv = ser.readline() #readline() need to set timeout, otherwise results block
ser.flushInput()
q.put(recv.decode())
print(recv.decode())
sleep(0.1)
def Lora_json(func):
global recv
while True:
if q.empty():
pass
else:
print(q.qsize())
data = q.get()
# json_lora = json.loads(bytes.decode(recv))
json_lora = json.loads(data)
#Parse JSON
#print(json_lora.get("ID"))
#print(json_lora["ID"])
#if json_lora.get("ID") == '1' : #Device ID-1 existed in gateway database
if int(json_lora.get("ID")) == 1 : #Device ID-1 existed in gateway database
if json_lora.get("CMD") == 'Online':
response = '{"ID":"1", "CMD":"Online", "TYPE":"Light2", "VALUE":"On"}'
print(response.encode())
elif json_lora.get("CMD") == 'Env':
if json_lora.get("TYPE") == 'moisture':
if int(json_lora.get("VALUE")) < 2000: # soil moisture is lower than standard
response = '{"ID":"1", "CMD":"irrigate", "TYPE":"Open", "VALUE":"100"}'
ser.write(str.encode(response))
else:
print('init_device')
#init_device() #Create sqlite table for device 1.
recv = ''
#print("This is %s. %s" % (func,ctime()))
sleep(1)
def gateway_init():
print('gateway init')
print('check gateway database existed or not')
print('dateway database do not exist')
print('read gateway ID from gateway.inf')
print('send ID to server to check gateway database backup on server or not')
#requests.post('http://www.sips.com/gateway', data=json.dumps({'ID': '123456'}))
print('if ID backup on server, download it, otherwise init it')
#url = 'http://www.sips.com/gateway/123456/sips.db'
#r = requests.get(url)
#with open("sips.db", "wb") as code:
# code.write(r.content)
print('init database......')
threads = []
t1 = threading.Thread(target=Lora,args=('Lora Thread',))
threads.append(t1)
t2 = threading.Thread(target=Lora_json,args=('Lora_json_parse Thread',))
threads.append(t2)
if __name__ == '__main__':
gateway_init()
for t in threads:
# t.setDaemon(True)
t.start()
while True:
#print("\nThis is the main thread!")
sleep(2)
| Python | 0.000028 | |
2b2ff2a528f6effd219bd13cd754c33b55e82e61 | add __init__.py, initialized bootstrap extension | app/__init__.py | app/__init__.py | from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
bootstrap = Bootstrap()
moment = Moment()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
return app | Python | 0.000019 | |
387b5732c0b2231580ae04bf5088ef7ce59b0d84 | Add script to normalize the spelling in a dataset | normalize_dataset.py | normalize_dataset.py | """Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
| Python | 0.000008 | |
dee535c8566d0e542891ed10939eec6448483a6f | read in cenque galaxy catalog | code/centralms.py | code/centralms.py | '''
'''
import h5py
import numpy as np
# --- local ---
import util as UT
class CentralMS(object):
def __init__(self, cenque='default'):
''' This object reads in the star-forming and quenching
galaxies generated from the CenQue project and is an object
for those galaxies. Unlike CenQue, this object WILL NOT
have extensive functions and will act as a data catalog.
'''
self.cenque = cenque
self.mass = None
self.sfr = None
self.ssfr = None
def _Read_CenQue(self):
''' Read in SF and Quenching galaixes generated from
the CenQue project.
'''
if self.cenque == 'default':
tf = 7
abcrun = 'RHOssfrfq_TinkerFq_Std'
prior = 'updated'
else:
raise NotImplementedError
file = ''.join([UT.dat_dir(), 'cenque/',
'sfms.centrals.',
'tf', str(tf),
'.abc_', abcrun,
'.prior_', prior,
'.hdf5'])
# read in the file and save to object
f = h5py.File(file, 'r')
grp = f['data']
for col in grp.keys():
if col == 'mass':
# make sure to mark as SHAM mass
setattr(self, 'M_sham', grp[col][:])
elif col in ['sfr', 'ssfr']:
continue
else:
setattr(self, col, grp[col][:])
f.close()
return None
def AssignSFR0(cms):
''' Assign initial SFRs to the cms object based on tsnap_genesis
(time when the halo enters the catalog) and mass_genesis
'''
if 'tsnap_genesis' not in cms.__dict__.keys():
# Most likely you did not read in CenQue catalog!
raise ValueError
# Assign SFR to star-forming galaxies
sfr_class[starforming] = 'star-forming'
mu_sf_sfr = AverageLogSFR_sfms(
mass[starforming],
redshift[starforming],
sfms_prop=sfms_dict)
sigma_sf_sfr = ScatterLogSFR_sfms(
mass[starforming],
redshift[starforming],
sfms_prop=sfms_dict)
avg_sfr[starforming] = mu_sf_sfr
delta_sfr[starforming] = sigma_sf_sfr * np.random.randn(ngal_sf)
sfr[starforming] = mu_sf_sfr + delta_sfr[starforming]
ssfr[starforming] = sfr[starforming] - mass[starforming]
if __name__=='__main__':
cms = CentralMS()
cms._Read_CenQue()
| Python | 0 | |
65e689dd66124fcaa0ce8ab9f5029b727fba18e2 | Add solution for compare version numbers | src/compare_version_numbers.py | src/compare_version_numbers.py | """
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
| Python | 0 | |
0da01e405849da1d5876ec5a758c378aaf70fab2 | add the canary | cleverhans/canary.py | cleverhans/canary.py | import numpy as np
import tensorflow as tf
from cleverhans.utils_tf import infer_devices
def run_canary():
"""
Runs some code that will crash if the GPUs / GPU driver are suffering from
a common bug. This helps to prevent contaminating results in the rest of
the library with incorrect calculations.
"""
# Note: please do not edit this function unless you have access to a machine
# with GPUs suffering from the bug and can verify that the canary still
# crashes after your edits. Due to the transient nature of the GPU bug it is
# not possible to unit test the canary in our continuous integration system.
# Try very hard not to let the canary affect the graph for the rest of the
# python process
canary_graph = tf.Graph()
with canary_graph.as_default():
devices = infer_devices()
num_devices = len(devices)
if num_devices < 3:
# We have never observed GPU failure when less than 3 GPUs were used
return
v = np.random.RandomState([2018, 10, 16]).randn(2, 2)
# Try very hard not to let this Variable end up in any collections used
# by the rest of the python process
w = tf.Variable(v, trainable=False, collections=[])
loss = tf.reduce_sum(tf.square(w))
grads = []
for device in devices:
with tf.device(device):
grad, = tf.gradients(loss, w)
grads.append(grad)
sess = tf.Session()
sess.run(tf.variables_initializer([w]))
grads = sess.run(grads)
first = grads[0]
for grad in grads[1:]:
if not np.allclose(first, grad):
# pylint can't see when we use variables via locals()
# pylint: disable=unused-variable
first_string = str(first)
grad_string = str(grad)
raise RuntimeError("Something is wrong with your GPUs or GPU driver."
"%(num_devices)d different GPUS were asked to "
"calculate the same 2x2 gradient. One returned "
"%(first_string)s and another returned "
"%(grad_string)s. This can usually be fixed by "
"rebooting the machine." % locals())
sess.close()
if __name__ == "__main__":
run_canary()
| Python | 0.999998 | |
c370edc980a34264f61e27d0dd288a7d6adf2d7e | Create consumer.py | bin/consumer.py | bin/consumer.py | # Consumer example to show the producer works: J.Oxenberg
from kafka import KafkaConsumer
consumer = KafkaConsumer(b'test',bootstrap_servers="172.17.136.43")
#wait for messages
for message in consumer:
print(message)
| Python | 0.000005 | |
70b312bde16a8c4fca47e4782f2293f0b96f9751 | Add test_datagen2.py | cnn/test_datagen2.py | cnn/test_datagen2.py | import os
import shutil
import numpy as np
from scipy.misc import toimage
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
def draw(X, filename):
plt.figure()
pos = 1
for i in range(X.shape[0]):
plt.subplot(4, 4, pos)
img = toimage(X[i])
plt.imshow(img)
plt.axis('off')
pos += 1
plt.savefig(filename)
if __name__ == '__main__':
img_rows, img_cols, img_channels = 32, 32, 3
batch_size = 16
nb_classes = 10
# CIFAR-10データをロード
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# 画素値を0-1に変換
X_train = X_train.astype('float32')
X_train /= 255.0
X_train = X_train[0:batch_size]
y_train = y_train[0:batch_size]
draw(X_train, 'datagen_before.png')
# データ拡張
datagen = ImageDataGenerator(
rotation_range=90,
zca_whitening=True
)
datagen.fit(X_train)
g = datagen.flow(X_train, y_train, batch_size, shuffle=False)
batch = g.next()
print(batch[0].shape)
print(batch[1].shape)
draw(batch[0], 'datagen_after.png')
| Python | 0.000213 | |
2dd5afae12dc7d58c3349f2df2694eeb77ca0298 | Test driving robot via serial input | examples/test_spinn_tracks4.py | examples/test_spinn_tracks4.py | import nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
| Python | 0 | |
f1826205782eb56ba6b478c70e671acae6872d35 | Read similarity graph | exp/influence2/GraphReader2.py | exp/influence2/GraphReader2.py | try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
import numpy
from apgl.util.PathDefaults import PathDefaults
import logging
class GraphReader2(object):
"""
A class to read the similarity graph generated from the Arnetminer dataset
"""
def __init__(self, field):
self.field = field
self.eps = 0.1
dirName = PathDefaults.getDataDir() + "reputation/" + self.field + "/arnetminer/"
self.coauthorFilename = dirName + "coauthors.csv"
self.coauthorMatrixFilename = dirName + "coauthorSimilarity.npy"
self.trainExpertsFilename = dirName + "experts_train_matches" + ".csv"
self.testExpertsFilename = dirName + "experts_test_matches" + ".csv"
logging.debug("Publications filename: " + self.coauthorFilename)
logging.debug("Training experts filename: " + self.trainExpertsFilename)
logging.debug("Test experts filename: " + self.testExpertsFilename)
def read(self):
K = numpy.load(self.coauthorMatrixFilename)
K = K.tolist()
graph = igraph.Graph.Weighted_Adjacency(K, mode="PLUS", loops=False)
print(graph.summary())
graph.simplify(combine_edges=sum)
graph.es["invWeight"] = 1.0/numpy.array(graph.es["weight"])
return graph
def readExperts(self, train=False):
"""
Read the experts from a test file. Returns two lists: expertsList is the
list of their names, and expertsIdList is their integer ID.
"""
if not train:
logging.debug("Reading test experts list")
expertsFile = open(self.testExpertsFilename)
else:
logging.debug("Reading training experts list")
expertsFile = open(self.trainExpertsFilename)
expertsList = expertsFile.readlines()
expertsFile.close()
coauthorsFile = open(self.coauthorFilename)
coauthors = coauthorsFile.readlines()
coauthorsFile.close()
expertsIdList = []
for expert in expertsList:
if expert in coauthors:
expertsIdList.append(coauthors.index(expert))
return expertsList, expertsIdList | Python | 0.000016 | |
e598608f21e30aeeec1ea9a8f452047a270fdc4d | add setup.py to build C module 'counts'; in perspective, it should setup cbclib on various systems | cbclib/setup.py | cbclib/setup.py | from distutils.core import setup, Extension
setup(
name="counts", version="0.1",
ext_modules=[Extension("counts", ["countsmodule.c", "countscalc.c"])]
)
| Python | 0.000002 | |
22769c9d84de432034ef592f94c77b5d5111599d | Create argparser.py | argparser.py | argparser.py | def get_args():
import argparse
import os
from sys import exit
parser = argparse.ArgumentParser(description='Automates android memory dumping')
parser.add_argument('-n', '--samplepath', required=True,help='path of the malware sample-apk')
parser.add_argument('-i', '--interval', required=True, type=int, help='intervals for each memory dump in seconds')
parser.add_argument('-d', '--sdcard', type=int, required=False, help='dump will be saved on the sdcard of the android device instead of being transfered over TCP')
parser.add_argument('-o', '--outputpath', required=False, help='path of the output-path')
parser.add_argument('-c', '--customconfig', required=False, help='path of a custom avd config.ini')
args = parser.parse_args()
if not os.path.isfile(args.samplepath) or (args.customconfig is not None and os.path.isfile(args.customconfig)):
raise Exception("error : one or more specified paths are not pointing to a file")
return args.samplepath, args.interval, args.sdcard, args.outputpath, args.customconfig
if __name__ == '__main__':
import sys
get_args(sys.argv[1:])
#AVDNAME = os.path.splitext(args.samplepath)[0]
#AVDPATH = args.samplepath
#os.path.isfile(fname)
#print(AVDNAME)
| Python | 0.000418 | |
ee52a96c5d0b6c08cb4e97210f3f995f78951fdc | add relay file to project | cgi-bin/moneywatch-relay.py | cgi-bin/moneywatch-relay.py | #!/usr/bin/python
#===============================================================================
# Code written by James Ottinger
#===============================================================================
import sys
sys.path.append('/dirsomewhere/')
import moneywatchengine
#===============================================================================
'''
-- I.SUMMARY.GET
-- I.ELECTION.GET
-- I.BULKADD.EDIT
-- I.BULKADD.SAVE
I.TICKERS.EDIT
I.TICKERS.SAVE
-- I.ENTRY.ADD
I.ENTRY.EDIT
I.ENTRY.SAVE
I.ENTRY.DEL
-- B.SUMMARY.GET
-- B.ACCOUNT.GET
B.BULKADD.EDIT
B.BULKADD.SAVE
B.BULKINT.EDIT
B.BULKINT.SAVE
B.MYACCT.ON
B.MYACCT.OFF
B.ENTRY.ADD
B.ENTRY.EDIT
B.ENTRY.SAVE
B.ENTRY.DEL
*- U.IMPORTFILE.EDIT
*- U.IMPORTFILE.SAVE
-- U.UPDATEQUOTES
'''
#===============================================================================
if moneywatchengine.g_formdata.getvalue('job') == 'I.SUMMARY.GET':
print "Content-type: text/html\n\n"
print moneywatchengine.i_summary()
elif moneywatchengine.g_formdata.getvalue('job') == 'I.ELECTION.GET':
# requires: ?ticker=
print "Content-type: text/html\n\n"
print moneywatchengine.i_electionget()
elif moneywatchengine.g_formdata.getvalue('job') == 'I.BULKADD.EDIT':
print "Content-type: text/html\n\n"
print moneywatchengine.i_bulkadd_edit()
elif moneywatchengine.g_formdata.getvalue('job') == 'I.BULKADD.SAVE':
print "Content-type: text/html\n\n"
moneywatchengine.i_bulkadd_save()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'I.ENTRY.ADD':
# requires: ?ticker=
print "Content-type: text/html\n\n"
print moneywatchengine.i_entry_prepareadd()
elif moneywatchengine.g_formdata.getvalue('job') == 'I.ENTRY.EDIT':
# requires: ?id=&ticker=
print "Content-type: text/html\n\n"
print moneywatchengine.i_entry_prepareedit()
elif moneywatchengine.g_formdata.getvalue('job') == 'I.ENTRY.ADDSAVE' or moneywatchengine.g_formdata.getvalue('job') == 'I.ENTRY.EDITSAVE':
print "Content-type: text/html\n\n"
moneywatchengine.i_prepare_addupdate()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'I.ENTRY.DELETE':
# requires: ?transid=
print "Content-type: text/html\n\n"
moneywatchengine.i_entry_delete()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'I.GRAPH.GET':
# requires: ?ticker=
print "Content-type: text/html\n\n"
print moneywatchengine.i_graph()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.SUMMARY.GET':
print "Content-type: text/html\n\n"
print moneywatchengine.b_summary()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.ACCOUNT.GET':
# requires: ?parentid=
print "Content-type: text/html\n\n"
print moneywatchengine.b_accountget()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.ENTRY.ADD':
# requires: ?bankacctid=
print "Content-type: text/html\n\n"
print moneywatchengine.b_entry_prepareadd()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.ENTRY.EDIT':
# requires: ?transid=
print "Content-type: text/html\n\n"
print moneywatchengine.b_entry_prepareedit()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.ENTRY.ADDSAVE' or moneywatchengine.g_formdata.getvalue('job') == 'B.ENTRY.EDITSAVE':
print "Content-type: text/html\n\n"
moneywatchengine.b_prepare_addupdate()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'B.ENTRY.DELETE':
# requires: ?transid=
print "Content-type: text/html\n\n"
moneywatchengine.b_entry_delete()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'B.BULKINTEREST.EDIT':
print "Content-type: text/html\n\n"
print moneywatchengine.b_bulkinterest_edit()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.BULKINTEREST.SAVE':
print "Content-type: text/html\n\n"
moneywatchengine.b_bulkinterest_save()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'B.BULKBILLS.EDIT':
print "Content-type: text/html\n\n"
print moneywatchengine.b_bulkbills_edit()
elif moneywatchengine.g_formdata.getvalue('job') == 'B.BULKBILLS.SAVE':
print "Content-type: text/html\n\n"
moneywatchengine.b_bulkbills_save()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'U.IMPORTFILE.EDIT':
print "Content-type: text/html\n\n"
print moneywatchengine.u_importfile_edit()
elif moneywatchengine.g_formdata.getvalue('job') == 'U.IMPORTFILE.SAVE':
print "Content-type: text/html\n\n"
print moneywatchengine.u_importfile_save()
elif moneywatchengine.g_formdata.getvalue('job') == 'U.UPDATEQUOTES':
print "Content-type: text/html\n\n"
moneywatchengine.u_fetchquotes()
print "ok"
elif moneywatchengine.g_formdata.getvalue('job') == 'U.UPDATEBANKTOTALS':
print "Content-type: text/html\n\n"
moneywatchengine.u_banktotals()
print "ok"
else:
print "Content-type: text/html\n\n"
print ")("# invalid, print ass cheeks
#moneywatchengine.b_accounttally(1)
#print moneywatchengine.i_entry_prepareedit()
| Python | 0 | |
dbe76ab17e795540de6a53b22f90c8af0cb15dbe | Add constants example | constants.example.py | constants.example.py | # coding: utf-8
from __future__ import unicode_literals
token = '123456789:dfghdfghdflugdfhg-77fwftfeyfgftre' # bot access_token
sn_stickers = ('CADAgADDwAu0BX', 'CAADA',
'CDAgADEQADfvu0Bh0Xd-rAg', 'CAADAgAADfvu0Bee9LyXSj1_fAg',) # ids
some2_stickers = ('CAADAKwADd_JnDFPYYarHAg', 'CAADAgADJmEyMU5rGAg')
allowed_stickers = sn_stickers + some2_stickers
default_probability = 0.01 # value hidden
del_symbols = '`~1234567890!@#' # symbols to ignore
quotes_dict = { # examples
(0.6, 'університет', 'университет'): """ну що тут сказати
цитата2
@, що Ви мали на увазі?""", # before sending @ will be replaced by username or name
(0.75, sn_stickers): """стікер зі мною детектед
а я непогайно тут виглядаю
цитата3"""}
| Python | 0.000011 | |
d777a19bb804ae1a4268702da00d3138b028b386 | Add a python script to start sysmobts-remote and dump docs | contrib/dump_docs.py | contrib/dump_docs.py | #!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
| Python | 0.000001 | |
436119b2ef8ea12f12b69e0d22dd3441b7e187cd | add ratelimit plugin | plugins/ratelimit.py | plugins/ratelimit.py | import time
buckets = {}
last_tick = time.time()
timeframe = float(yui.config_val('ratelimit', 'timeframe', default=60.0))
max_msg = float(yui.config_val('ratelimit', 'messages', default=6.0))
ignore_for = 60.0 * float(yui.config_val('ratelimit', 'ignoreMinutes', default=3.0))
@yui.event('postCmd')
def ratelimit(user, msg):
if user not in buckets.keys():
buckets[user] = 1.0
else:
buckets[user] += 1.0
if buckets[user] > max_msg:
yui.ignore(ignore_for, user.nick)
@yui.event('tick')
def tick():
global last_tick
now = time.time()
diff = now - last_tick
for user, n in buckets.items():
n -= ((max_msg / timeframe) * diff)
n = n if n > 0 else 0
buckets[user] = n
last_tick = now
| Python | 0 | |
83579a7e10d66e29fc65c43ba317c6681a393d3e | Add simple hub datapath | pox/datapaths/hub.py | pox/datapaths/hub.py | # Copyright 2017 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A simple hub datapath.
Launch it with a number of interface names, and it will pass packets
between them. Requires pxpcap to be built -- see "Building pxpcap"
in the POX manual.
Example:
./pox.py datapaths.hub --ports=eth0,eth1,eth2
"""
from pox.core import core
from Queue import Queue
import pox.lib.packet as pkt
from pox.lib.interfaceio import PCapInterface
class Hub (object):
"""
A simple hub
"""
def __init__ (self, ports=[]):
self._ports = set()
self.rx_bytes = 0
for p in ports:
self.add_port(p)
def add_port (self, port):
p = PCapInterface(port)
p.addListeners(self)
self._ports.add(p)
def _handle_RXData (self, event):
self.rx_bytes += len(event.data)
for port in self._ports:
if port is event.interface: continue
port.write(event.data)
def launch (ports):
ports = ports.replace(","," ").split()
l = Hub()
core.register("hub", l)
for p in ports:
l.add_port(p)
| Python | 0 | |
d753fe46507d2e829c0b6ffc3120ec8f9472c4f1 | Add Problem 59 solution. | project-euler/059.py | project-euler/059.py | '''
Problem 59
19 December 2003
Each character on a computer is assigned a unique code and the preferred standard is ASCII (American Standard Code for Information Interchange). For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, taken from a secret key. The advantage with the XOR function is that using the same encryption key on the cipher text, restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text message, and the key is made up of random bytes. The user would keep the encrypted message and the encryption key in different locations, and without both "halves", it is impossible to decrypt the message.
Unfortunately, this method is impractical for most users, so the modified method is to use a password as a key. If the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message. The balance for this method is using a sufficiently long password key for security, but short enough to be memorable.
Your task has been made easy, as the encryption key consists of three lower case characters. Using cipher1.txt, a file containing the encrypted ASCII codes, and the knowledge that the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.
'''
import collections, operator, string
file = open('059.txt', 'r')
encrypted = map(int, file.read().split(','))
length = len(encrypted)
decrypted = [0 for i in range(length)]
chars = range(97, 123)
for i in chars:
for x in range(length)[0::3]:
decrypted[x] = operator.xor(i, encrypted[x])
for j in chars:
for x in range(length)[1::3]:
decrypted[x] = operator.xor(j, encrypted[x])
for k in chars:
for x in range(length)[2::3]:
decrypted[x] = operator.xor(k, encrypted[x])
# Spaces are the most common character in the English language, occurring
# just less than once per every 5 chars (19.18182%), so filter by a high
# frequency of spaces. (See http://www.data-compression.com/english.html)
if (decrypted.count(32) > 0.15*length):
print ''.join(map(chr, decrypted))
print sum([char for char in decrypted])
| Python | 0.000007 | |
d1fcf47d62671abbb2ec8a278460dd64a4de03c2 | Create cryptoseven.py | cryptoseven.py | cryptoseven.py | import sys
def strxor(a, b): # xor two strings of different lengths
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
def printAscii(msg):
z = [chr(ord(x)) for x in msg]
x = "".join(z)
print x.encode('hex')
def main():
text = "attack at dawn"
enc = "6c73d5240a948c86981bc294814d".decode('hex')
key = strxor(text, enc)
text2 = "attack at dusk"
enc2 = strxor(text2, key)
print enc2.encode('hex')
main()
| Python | 0.000001 | |
a890be194cfcb69fb4b847d7ec06cf324d868bc9 | Create base-code.py | base-code.py | base-code.py | #Base Code of project written in Python 3
import binascii
import re
import random
BIGCOUNT = 1
div = 0
effcount = 0
while (BIGCOUNT <= 1000000):
n = 2
binary = str(bin(int.from_bytes('The quick brown fox jumps over the lazy dog THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG!!@#$%^&*()<>?:"{} . , / \ |[] 0 1 2 3 4 5 6 7 8 9 + - _ = ~ `'.encode(), 'big')))
binary = re.sub('b', '', binary)
binaryArray = [binary[i:i+n] for i in range(0, len(binary), n)]
newArray = ['A' if x=='00' else x for x in binaryArray]
newArray = ['T' if x=='11' else x for x in newArray]
newArray = ['G' if x=='01' else x for x in newArray]
newArray = ['C' if x=='10' else x for x in newArray]
lengthArray = len(newArray)
knownArray2 = list(newArray)
count = 24
con = 1
while count < len(newArray):
if con == 1:
newArray.insert(count,'T')
con = con + 1
count = count + 1
elif con == 12:
newArray.insert(count,'T')
con = con + 1
count = count + 1
elif con <= 12:
newArray.insert(count, 'A')
con = con + 1
count = count + 1
else:
count = count + 24
con = 1
lengthArray = len(newArray)
knownArray = []
knownArray = list(newArray)
newCount = 0
counter = 0
mutationsCount = 0
while newCount <= len(newArray):
while counter <= 23 and newCount < len(newArray):
randVal = int(random.randrange(0, 4))
if randVal == 1:
mu = 'A'
elif randVal == 2:
mu = 'T'
elif randVal == 3:
mu = 'G'
else:
mu = 'C'
rand = int(random.randrange(0, 1001))
mutation1 = int(random.randrange(0, 1001))
if (rand == mutation1):
if (mutation1 == 2):
newArray.pop(newCount)
counter = counter +1
mutationsCount = mutationsCount +1
elif (mutation1 == 4):
newArray.insert(newCount, mu)
newCount = newCount + 1
mutationsCount = mutationsCount +1
else:
newArray.insert(newCount, mu)
newArray.pop(newCount+1)
newCount = newCount + 1
counter = counter +1
mutationsCount = mutationsCount +1
else:
newCount = newCount + 1
counter = counter + 1
newCount = newCount + 13
counter = 1
deCount = 0
deCount2 = 0
while deCount <= (len(newArray)- 24):
if (newArray[deCount]=='T' and newArray[deCount+1]=='A' and newArray[deCount+2] and newArray[deCount+3]=='A' and newArray[deCount+4] and newArray[deCount+5]=='A' and newArray[deCount+6] and newArray[deCount+7]=='A' and newArray[deCount+8] and newArray[deCount+9]=='A' and newArray[deCount+10]== 'A' and newArray[deCount+11] == 'T'):
if deCount2 == 24:
deCount = deCount + 12
deCount2 = 0
elif deCount2 < 24:
newArray.insert(deCount, 'A')
deCount = deCount + 1
deCount2 = deCount2 + 1
elif deCount2 > 24:
newArray.pop(deCount - 1)
deCount = deCount - 1
deCount2 = deCount2 - 1
else:
deCount = deCount + 1
deCount2 = deCount2 + 1
new = 1
efCount = 0
while new <= len(knownArray)-1 and new <= len(newArray)-1:
if knownArray[new] == newArray[new]:
efCount = efCount + 1
new = new + 1
else:
new = new + 1
efCount = float(efCount / (len(knownArray)-1)*100)
codeCount = 24
counter11 = 1
while (codeCount < len(newArray)):
while counter11 <= 12:
newArray.pop(codeCount)
counter11 = counter11 + 1
codeCount = codeCount + 24
counter11 = 1
new = 0
efCount = 0
while new <= len(knownArray2)-1 and new <= len(newArray)-1:
if knownArray2[new] == newArray[new]:
efCount = efCount + 1
new = new + 1
else:
new = new + 1
efCount = float(efCount / (len(knownArray2))*100)
div = div + 1
effcount = effcount + efCount
BIGCOUNT = BIGCOUNT + 1
effiency = effcount/div
print (effiency)
| Python | 0.000017 | |
baeecbd66e1acd48aa11fdff4c65567c72d88186 | Create client.py | ohesteebee/client.py | ohesteebee/client.py | """Ohessteebee client."""
import requests
from typing import Dict
PutDict = Dict[str, str]
class Ohessteebee:
def __init__(self, endpoint, port=4242):
self.session = requests.Session()
self.req_path = "http://{endpoint}:{port}".format(
endpoint=endpoint, port=port)
def _generate_put_dict(metric: str, timestamp: int, value: int, **kwargs) -> PutDict:
if kwargs:
tags = {**kwargs}
else:
tags = {}
response = {
"metric": metric,
"timestamp": timestamp,
"value": value,
"tags": tags
}
return response
def query(self, metric: str, start_date=None, end_date=None):
"""Get metric from OSTB."""
path = "/api/query"
api_url = self.req_path + path
self.session.get(api_url)
def put(self, metric: str, timestamp: int, **kwargs):
"""Put metric into OSTB."""
path = "/api/put"
api_url = self.req_path + path
data = self._generate_put_dict()
self.sesion.post(api_url)
| Python | 0.000001 | |
22494a45d2bce6774bdc50409a71f259841287f5 | add initial GlimError | glim/exception.py | glim/exception.py |
class GlimError(Exception):
pass | Python | 0.01808 | |
151599dd242eb0cb0da4771ca3798d66314719f0 | add queue module | greennet/queue.py | greennet/queue.py | import time
from collections import deque
from py.magic import greenlet
from greennet import get_hub
from greennet.hub import Wait
class QueueWait(Wait):
__slots__ = ('queue',)
def __init__(self, task, queue, expires):
super(QueueWait, self).__init__(task, expires)
self.queue = queue
def timeout(self):
getattr(self.queue, self._wait_attr).remove(self)
super(QueueWait, self).timeout()
class PopWait(QueueWait):
"""Wait for a pop to happen."""
__slots__ = ()
_wait_attr = '_pop_waits'
class AppendWait(QueueWait):
"""Wait for an append to happen."""
__slots__ = ()
_wait_attr = '_append_waits'
class Queue(object):
def __init__(self, maxlen=None, hub=None):
self.queue = deque()
self.maxlen = maxlen
self.hub = get_hub() if hub is None else hub
self._append_waits = deque()
self._pop_waits = deque()
def __len__(self):
return len(self.queue)
def full(self):
if self.maxlen is None:
return False
return len(self.queue) >= self.maxlen
def _wait_for_append(self, timeout):
expires = None if timeout is None else time.time() + timeout
wait = AppendWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
self._append_waits.append(wait)
self.hub.run()
def _wait_for_pop(self, timeout):
expires = None if timeout is None else time.time() + timeout
wait = PopWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
self._pop_waits.append(wait)
self.hub.run()
def _popped(self):
if self._pop_waits:
wait = self._pop_waits.popleft()
if wait.expires is not None:
self.hub._remove_timeout(wait)
self.hub.schedule(wait.task)
def _appended(self):
if self._append_waits:
wait = self._append_waits.popleft()
if wait.expires is not None:
self.hub._remove_timeout(wait)
self.hub.schedule(wait.task)
def wait_until_empty(self, timeout=None):
if not self.queue:
return
expires = None if timeout is None else time.time() + timeout
wait = PopWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
while self.queue:
self._pop_waits.append(wait)
self.hub.run()
self._popped()
def pop(self, timeout=None):
if not self.queue:
self._wait_for_append(timeout)
item = self.queue.pop()
self._popped()
return item
def popleft(self, timeout=None):
if not self.queue:
self._wait_for_append(timeout)
item = self.queue.popleft()
self._popped()
return item
def clear(self):
self.queue.clear()
self._popped()
def append(self, item, timeout=None):
if self.full():
self._wait_for_pop(timeout)
self.queue.append(item)
self._appended()
def appendleft(self, item, timeout=None):
if self.full():
self._wait_for_pop(timeout)
self.queue.appendleft(item)
self._appended()
| Python | 0.000001 | |
8d5059fcd672fb4f0fcd7a2b57bf41f57b6269e5 | add mongo handler | src/orchestrator/core/mongo.py | src/orchestrator/core/mongo.py | #
# Copyright 2018 Telefonica Espana
#
# This file is part of IoT orchestrator
#
# IoT orchestrator is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# IoT orchestrator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with IoT orchestrator. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
#
# Author: IoT team
#
import json
import logging
from orchestrator.common.util import RestOperations
import pymongo
logger = logging.getLogger('orchestrator_core')
class MongoDBOperations(object):
'''
IoT platform: MongoDB
'''
def __init__(self,
MONGODB_URI=None,
CORRELATOR_ID=None,
TRANSACTION_ID=None):
self.MONGODB_URI = MONGO_URI
self.client = pymongo.MongoClient(self.MONGODB_URI)
def checkMongo(self):
try:
client.list_databases()
return True
except Exception, e:
logger.warn("checkMongo exception: %s" % e)
return False
def createIndexes(self, SERVICE_NAME):
try:
databaseName = 'orion-' + SERVICE_NAME
db = self.client[databaseName]
db.entities.create_index("_id.id")
db.entities.create_index("_id.type")
db.entities.create_index("_id.servicePath")
db.entities.create_index("_id.creDate")
except Exception, e:
logger.warn("createIndex exception: %s" % e)
def removeDatabase(self, SERVICE_NAME):
try:
databaseName = 'orion-' + SERVICE_NAME
self.client.drop_database(databaseName)
except Exception, e:
logger.warn("createIndex exception: %s" % e)
| Python | 0.000001 | |
90ec9def45bcc50047d3511943c463f57f771f00 | Bump to 3.2.0 | dbbackup/__init__.py | dbbackup/__init__.py | "Management commands to help backup and restore a project database and media"
VERSION = (3, 2, 0)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = 'mjs7231@gmail.com'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
| "Management commands to help backup and restore a project database and media"
VERSION = (3, 1, 3)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = 'mjs7231@gmail.com'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
| Python | 0.00008 |
2c29829bb6e0483a3dc7d98bc887ae86a3a233b7 | Fix dir name of preprocess | pyPanair/preprocess/__init__.py | pyPanair/preprocess/__init__.py | Python | 0.998086 | ||
3e7f8c5b87a85958bd45636788215db1ba4f2fd8 | Create __init__.py | src/site/app/model/__init__.py | src/site/app/model/__init__.py | # -*- coding: utf-8 -*-
| Python | 0.000429 | |
38c2291ab23d86d220446e594d52cce80ea4ec2a | Create Count_Inversions_Array.py | Experience/Count_Inversions_Array.py | Experience/Count_Inversions_Array.py | '''
Inversion Count for an array indicates – how far (or close) the array is from being sorted. If array is already sorted then inversion count is 0. If array is sorted in reverse order that inversion count is the maximum.
Formally speaking, two elements a[i] and a[j] form an inversion if a[i] > a[j] and i < j
Example:
The sequence 2, 4, 1, 3, 5 has three inversions (2, 1), (4, 1), (4, 3).
'''
# Note: G4G Analysis (http://www.geeksforgeeks.org/counting-inversions/)
def count_inver(A):
if not A: return A
length = len(A)
return merge_sort(A, 0, length-1)
def merge_sort(A, left, right):
inver_cnt = 0
if left < right:
mid = (left + right)/2
inver_cnt = merge_sort(A, left, mid)
inver_cnt += merge_sort(A, mid+1, right)
inver_cnt += merge(A, left, mid+1, right)
return inver_cnt
def merge(A, left, mid, right):
i = left; j = mid; k = left
print "i: %d, mid: %d, j: %d, k: %d, right: %d" %(i, mid, j, k, right)
inver_cnt = 0
tmp = [0 for p in xrange(len(A))]
print "tmp: ", tmp
while (i < mid) and (j <= right):
print "A[i]: %d, A[j]: %d" %(A[i], A[j])
if A[i] <= A[j]:
tmp[k] = A[i]
i += 1
k += 1
print "< after: i: %d, j: %d, k: %d, right: %d" %(i, j, k, right)
else:
tmp[k] = A[j]
j += 1
k += 1
print "> after: i: %d, j: %d, k: %d, right: %d" %(i, j, k, right)
inver_cnt += mid - i
print "inver_cnt: ", inver_cnt
while i < mid:
tmp[k] = A[i]
i += 1
k += 1
while j <= right:
tmp[k] = A[j]
j += 1
k ++ 1
A[left:right+1] = tmp[left:right+1]
print "after merge: A", A
return inver_cnt
ilist = [2,4,5,1,3,5]
print count_inver(ilist)
| Python | 0.000003 | |
cebee9931f38531717f907502a1da04da659c5c2 | Add missing migration | app/groups/migrations/0008_auto__add_groupcategory__add_field_groupinformation_category.py | app/groups/migrations/0008_auto__add_groupcategory__add_field_groupinformation_category.py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupCategory'
db.create_table('groups_groupcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length='250')),
))
db.send_create_signal('groups', ['GroupCategory'])
# Adding field 'GroupInformation.category'
db.add_column('groups_groupinformation', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='groups', null=True, to=orm['groups.GroupCategory']),
keep_default=False)
def backwards(self, orm):
# Deleting model 'GroupCategory'
db.delete_table('groups_groupcategory')
# Deleting field 'GroupInformation.category'
db.delete_column('groups_groupinformation', 'category_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.groupcategory': {
'Meta': {'object_name': 'GroupCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'250'"})
},
'groups.groupinformation': {
'Meta': {'ordering': "['group']", 'object_name': 'GroupInformation'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'null': 'True', 'to': "orm['groups.GroupCategory']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'children'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'requestable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
'groups.grouprequest': {
'Meta': {'ordering': "['created_date']", 'object_name': 'GroupRequest'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requests'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grouprequests'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['groups'] | Python | 0.0002 | |
59b531e11266b2ff8184c04cda92bcc2fad71fa0 | Create core.py | crispy/actions/core.py | crispy/actions/core.py | from crispy.actions.attacks import Attack, Melee, Ranged, Throw, Shoot
| Python | 0.000001 | |
d6dc45756cbb30a8f707d683943ccd4ee0391e6b | Add an aws settings for the cms | cms/envs/aws.py | cms/envs/aws.py | """
This is the default template for our main set of AWS servers.
"""
import json
from .logsettings import get_logger_config
from .common import *
############################### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
########################### NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(ENV_ROOT / "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
for feature, value in ENV_TOKENS.get('MITX_FEATURES', {}).items():
MITX_FEATURES[feature] = value
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
syslog_addr=(ENV_TOKENS['SYSLOG_SERVER'], 514),
debug=False)
REPOS = ENV_TOKENS['REPOS']
############################## SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(ENV_ROOT / "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = AUTH_TOKENS['MODULESTORE']
| Python | 0 | |
b32d659b85901a8e04c6c921928483fda3b3e6e0 | Add the storage utility for parsing the config file structure in a more readable fashion. | src/leap/mx/util/storage.py | src/leap/mx/util/storage.py |
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
None
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
return None
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self, value):
for (k, v) in value.items():
self[k] = v
| Python | 0 | |
49a4d3d5bfed0bb12a0e4cdee50672b23533c128 | move data to new table | compass-api/G4SE/api/migrations/0005_auto_20161010_1253.py | compass-api/G4SE/api/migrations/0005_auto_20161010_1253.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3.dev20161004124613 on 2016-10-10 12:53
from __future__ import unicode_literals
from django.db import migrations
from django.utils import timezone
from api.models import GEO_SERVICE_METADATA_AGREED_FIELDS
def _extract_publication_year(record_kwargs):
if record_kwargs['publication_year'] == 'latest':
record_kwargs['is_latest'] = True
years = [int(year) for year in record_kwargs['publication_lineage'].split(',')]
years.sort()
record_kwargs['publication_year'] = years[-1]
else:
record_kwargs['publication_year'] = int(record_kwargs['publication_year'])
return record_kwargs
def _normalize_kwargs(record_kwargs, record_object):
record_kwargs['title'] = getattr(record_object, 'content')
record_kwargs = _extract_publication_year(record_kwargs)
return record_kwargs
def _extract_kwargs_(record_object, from_import):
record_kwargs = {}
fields = GEO_SERVICE_METADATA_AGREED_FIELDS.copy()
fields.remove('is_latest')
fields.remove('title')
for field_name in fields:
record_kwargs[field_name] = getattr(record_object, field_name)
record_kwargs = _normalize_kwargs(record_kwargs, record_object)
record_kwargs['imported'] = from_import
if 'created' not in record_kwargs:
record_kwargs['created'] = timezone.datetime(year=2016, month=9, day=30)
return record_kwargs
def _create_new_entry(apps, model_kwargs):
GeoServiceMetadata = apps.get_model("api", "GeoServiceMetadata")
model_kwargs['geodata_type'] = model_kwargs['geodata_type'].lower()
data_type = model_kwargs['geodata_type']
if data_type not in ['raster', 'vector']:
model_kwargs['geodata_type'] = 'other'
GeoServiceMetadata.objects.create(**model_kwargs)
def forward(apps, schema_editor):
# forward
HarvestedRecord = apps.get_model("api", "HarvestedRecord")
Record = apps.get_model("api", "Record")
for harvested_record in HarvestedRecord.objects.all():
_create_new_entry(apps, _extract_kwargs_(harvested_record, from_import=True))
for record in Record.objects.all():
_create_new_entry(apps, _extract_kwargs_(record, from_import=False))
def _kwargs_from_geo_service_metadata(geoservice_metadata_instance):
result_kwargs = {}
for field_name in GEO_SERVICE_METADATA_AGREED_FIELDS:
result_kwargs[field_name] = getattr(geoservice_metadata_instance, field_name)
result_kwargs['content'] = result_kwargs.pop('title')
return result_kwargs
def backward(apps, schemap_editor):
GeoServiceMetadata = apps.get_model("api", "GeoServiceMetadata")
GeoServiceMetadata.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('api', '0004_geoservicemetadata'),
]
operations = [
migrations.RunPython(forward, backward),
]
| Python | 0.000003 | |
412de29818c955d878895ab31f54ef3079aa8d0e | Compute the quantity of fuel consumed | openfisca_france_indirect_taxation/example/example_ticpe/compute_quantite_carburants.py | openfisca_france_indirect_taxation/example/example_ticpe/compute_quantite_carburants.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 14:32:30 2015
@author: thomas.douenne
"""
from __future__ import division
import numpy as np
from openfisca_france_indirect_taxation.example.utils_example import simulate_df
from openfisca_france_indirect_taxation.model.get_dataframe_from_legislation.get_accises import \
get_accise_ticpe_majoree
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
# Liste des variables que l'on veut simuler
var_to_be_simulated_with_e10 = [
'pondmen',
'revtot',
'somme_coicop12_conso',
'rev_disp_loyerimput',
'ticpe_totale',
'sp_e10_ticpe',
'sp95_ticpe',
'sp98_ticpe',
'super_plombe_ticpe',
'diesel_ticpe',
'paysnai'
]
var_to_be_simulated_without_e10 = [
'pondmen',
'revtot',
'somme_coicop12_conso',
'rev_disp_loyerimput',
'ticpe_totale',
'sp95_ticpe',
'sp98_ticpe',
'super_plombe_ticpe',
'diesel_ticpe',
'paysnai'
]
quantites_carburants_consommees = dict()
for year in [2000, 2005, 2011]:
try:
data_simulation = simulate_df(var_to_be_simulated = var_to_be_simulated_with_e10, year = year)
data_simulation['diesel_ticpe_ponderee'] = data_simulation['diesel_ticpe'] * data_simulation['pondmen']
data_simulation['sp95_ticpe_ponderee'] = data_simulation['sp95_ticpe'] * data_simulation['pondmen']
data_simulation['sp98_ticpe_ponderee'] = data_simulation['sp98_ticpe'] * data_simulation['pondmen']
data_simulation['sp_e10_ticpe_ponderee'] = data_simulation['sp_e10_ticpe'] * data_simulation['pondmen']
data_simulation['super_plombe_ticpe_ponderee'] = \
data_simulation['super_plombe_ticpe'] * data_simulation['pondmen']
except:
data_simulation = simulate_df(var_to_be_simulated = var_to_be_simulated_without_e10, year = year)
data_simulation['diesel_ticpe_ponderee'] = data_simulation['diesel_ticpe'] * data_simulation['pondmen']
data_simulation['sp95_ticpe_ponderee'] = data_simulation['sp95_ticpe'] * data_simulation['pondmen']
data_simulation['sp98_ticpe_ponderee'] = data_simulation['sp98_ticpe'] * data_simulation['pondmen']
data_simulation['sp_e10_ticpe_ponderee'] = 0
data_simulation['super_plombe_ticpe_ponderee'] = \
data_simulation['super_plombe_ticpe'] * data_simulation['pondmen']
liste_carburants_accise = get_accise_ticpe_majoree()
value_accise_diesel = liste_carburants_accise['accise majoree diesel'].loc[u'{}'.format(year)] / 100
value_accise_sp = liste_carburants_accise['accise majoree sans plomb'].loc[u'{}'.format(year)] / 100
value_accise_super_plombe = \
liste_carburants_accise['accise majoree super plombe'].loc[u'{}'.format(year)] / 100
quantite_diesel = data_simulation['diesel_ticpe_ponderee'].sum() / (value_accise_diesel)
quantite_sans_plomb = (
data_simulation['sp95_ticpe_ponderee'].sum() + data_simulation['sp98_ticpe_ponderee'].sum() +
data_simulation['sp_e10_ticpe_ponderee'].sum()) / (value_accise_sp)
quantite_super_plombe = data_simulation['super_plombe_ticpe_ponderee'].sum() / (value_accise_super_plombe)
if quantite_super_plombe == np.nan:
quantite_essence = quantite_sans_plomb + quantite_super_plombe
else:
quantite_essence = quantite_sans_plomb
quantite_carburants = quantite_diesel + quantite_essence
quantites_carburants_consommees['en milliers de m3 en {}'.format(year)] = quantite_carburants / 1000000
| Python | 0.999989 | |
7c8f2464b303b2a40f7434a0c26b7f88c93b6ddf | add migration | corehq/apps/accounting/migrations/0036_subscription_skip_invoicing_if_no_feature_charges.py | corehq/apps/accounting/migrations/0036_subscription_skip_invoicing_if_no_feature_charges.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounting', '0035_kill_date_received'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='skip_invoicing_if_no_feature_charges',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| Python | 0.000001 | |
5e765ecf387d52c22371a69df82beacddcd12e38 | Test COREID is read-only. | revelation/test/test_storage.py | revelation/test/test_storage.py | from revelation.test.machine import StateChecker, new_state
def test_coreid_read_only():
state = new_state(rfCOREID=0x808)
# Change by writing to register.
state.rf[0x65] = 0x100
expected_state = StateChecker(rfCOREID=0x808)
expected_state.check(state)
# Change by writing to memory.
# This _is_ possible, because we need to be able to write the COREID
# location when the state is initially constructed.
state.mem.write(0x808f0704, 12, 0x100)
expected_state = StateChecker(rfCOREID=0x100)
expected_state.check(state)
| Python | 0 | |
6af41b8b1ff4a6eb28167a063668a1f173999e5c | Create cornersMapping.py | cornersMapping.py | cornersMapping.py |
import csv
import requests
import time
import json
username = ""
def requestGeoName(row):
#parts = row.split(',')
lng = row[0]
lat = row[1]
r = requests.get("http://api.geonames.org/findNearestIntersectionOSMJSON?lat="+lat+"&lng="+lng+"&username="+username)
if (r.status_code == 200):
return r.json()
else:
return {"error":r.status_code}
def requestNameWsUsig(row):
x = row[0]
y = row[1]
reqReverseGeo = requests.get("http://ws.usig.buenosaires.gob.ar/geocoder/2.2/reversegeocoding?y={0}&x={1}".format(y,x))
resReverseGeo = json.loads(reqReverseGeo.content.replace("(", "").replace(")", ""), encoding="utf-8")
reqConvertirCoord = requests.get("http://ws.usig.buenosaires.gob.ar/rest/convertir_coordenadas?x={0}&y={1}&output=lonlat".format(resReverseGeo["puerta_x"], resReverseGeo["puerta_y"]))
resConvertirCoord = reqConvertirCoord.json()
result = { "intersection" : {
"lng" : resConvertirCoord["resultado"]["x"],
"lat" : resConvertirCoord["resultado"]["y"],
"street1" : resReverseGeo["esquina"],
"street2" : resReverseGeo["esquina"]
}}
return result
with open('mostSearchedPlaces.csv', 'rb') as csvfile:
with open('mostSearchedPlacesWithCorners.csv', 'a') as outputCSV:
csv_writer = csv.writer(outputCSV, delimiter=',')
reader = csv.reader(csvfile, delimiter = ',')
i = 1
for row in reader:
geoNameResult = requestGeoName(row)
# Check if there is no intersection
if (geoNameResult == {}):
geoNameResult = requestNameWsUsig(row)
print(geoNameResult)
if (not geoNameResult.has_key("error")):
row.append(str(geoNameResult["intersection"]["lng"]))
row.append(str(geoNameResult["intersection"]["lat"]))
row.append(geoNameResult["intersection"]["street1"].encode("utf-8"))
row.append(geoNameResult["intersection"]["street2"].encode("utf-8"))
csv_writer.writerow(row)
print("Elemento {0} procesado".format(i))
i += 1
time.sleep(2)
| Python | 0.000001 | |
b2d0eaca41f6c697006eeaef38b72af649415d2b | Create models.py | {{cookiecutter.repo_name}}/{{cookiecutter.src_dir}}/{{cookiecutter.main_app}}/models.py | {{cookiecutter.repo_name}}/{{cookiecutter.src_dir}}/{{cookiecutter.main_app}}/models.py | # -*- encoding: utf-8 -*-
# ! python2
| Python | 0.000001 | |
d1b4cbfbc3956fc72bd183dbc219c4e7e8bdfb98 | add reproducer for LWT bug with static-column conditions | test/cql-pytest/test_lwt.py | test/cql-pytest/test_lwt.py | # Copyright 2020-present ScyllaDB
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#############################################################################
# Various tests for Light-Weight Transactions (LWT) support in Scylla.
# Note that we have many more LWT tests in the cql-repl framework:
# ../cql/lwt*_test.cql, ../cql/cassandra_cql_test.cql.
#############################################################################
import re
import pytest
from cassandra.protocol import InvalidRequest
from util import new_test_table, unique_key_int
@pytest.fixture(scope="module")
def table1(cql, test_keyspace):
schema='p int, c int, r int, s int static, PRIMARY KEY(p, c)'
with new_test_table(cql, test_keyspace, schema) as table:
yield table
# An LWT UPDATE whose condition uses non-static columns begins by reading
# the clustering row which must be specified by the WHERE. If there is a
# static column in the partition, it is read as well. The value of the all
# these columns - regular and static - is then passed to the condition.
# As discovered in issue #10081, if the row determined by WHERE does NOT
# exist, Scylla still needs to read the static column, but forgets to do so.
# this test reproduces this issue.
@pytest.mark.xfail(reason="Issue #10081")
def test_lwt_missing_row_with_static(cql, table1):
p = unique_key_int()
# Insert into partition p just the static column - and no clustering rows.
cql.execute(f'INSERT INTO {table1}(p, s) values ({p}, 1)')
# Now, do an update with WHERE p={p} AND c=1. This clustering row does
# *not* exist, so we expect to see r=null - and s=1 from before.
r = list(cql.execute(f'UPDATE {table1} SET s=2,r=1 WHERE p={p} AND c=1 IF s=1 and r=null'))
assert len(r) == 1
assert r[0].applied == True
# At this point we should have one row, for c=1
assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [(p, 1, 2, 1)]
# The fact that to reproduce #10081 above we needed the condition (IF) to
# mention a non-static column as well, suggests that Scylla has a different code
# path for the case that the condition has *only* static columns. In fact,
# in that case, the WHERE doesn't even need to specify the clustering key -
# the partition key should be enough. The following test confirms that this
# is indeed the case.
def test_lwt_static_condition(cql, table1):
p = unique_key_int()
cql.execute(f'INSERT INTO {table1}(p, s) values ({p}, 1)')
# When the condition only mentions static (partition-wide) columns,
# it is allowed not to specify the clustering key in the WHERE:
r = list(cql.execute(f'UPDATE {table1} SET s=2 WHERE p={p} IF s=1'))
assert len(r) == 1
assert r[0].applied == True
assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [(p, None, 2, None)]
# When the condition also mentions a non-static column, WHERE must point
# to a clustering column, i.e., mention the clustering key. If the
# clustering key is missing, we get an InvalidRequest error, where the
# message is slightly different between Scylla and Cassandra ("Missing
# mandatory PRIMARY KEY part c" and "Some clustering keys are missing: c",
# respectively.
with pytest.raises(InvalidRequest, match=re.compile('missing', re.IGNORECASE)):
cql.execute(f'UPDATE {table1} SET s=2 WHERE p={p} IF r=1')
| Python | 0.000043 | |
89c17110f9d17e99ea7686e884cfba91b4762d57 | Add starter code for Lahman db | pybaseball/lahman.py | pybaseball/lahman.py | ################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
| Python | 0 | |
8eafb1b613363f85c9b105812cd5d0047e5ca6ff | Add warp example script | image_processing/warp_image.py | image_processing/warp_image.py | import argparse
import cv2
import numpy as np
import matplotlib.pyplot as plt
from constants import MAX_WIDTH, MAX_HEIGHT
# Transform Parameters
y = 90
a = 0.75
delta = (MAX_HEIGHT - y) * a
height, width = 500, 320
# Orignal and transformed keypoints
pts1 = np.float32(
[[delta, y],
[MAX_WIDTH - delta, y],
[0, MAX_HEIGHT],
[MAX_WIDTH, MAX_HEIGHT]])
pts2 = np.float32(
[[0, 0],
[width, 0],
[0, height],
[width, height]])
# Translation Matrix
tx, ty = 300, 500
T = np.float32([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
new_height, new_width = height + ty, int(width * 1.5) + tx
# calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(pts1, pts2)
def imshow(im, y=None, delta=None, name=""):
plt.figure(name)
# BGR to RGB
plt.imshow(im[:, :, ::-1])
if y is not None:
plt.plot([0, delta], [MAX_HEIGHT, y])
plt.plot([MAX_WIDTH, MAX_WIDTH - delta], [MAX_HEIGHT, y])
plt.plot([delta, MAX_WIDTH - delta], [y, y])
plt.grid(True)
def showTransform(image, y, delta):
im = image.copy()
for (cx, cy) in pts1:
cv2.circle(im, (int(cx), int(cy)), 8, (0, 255, 0), -1)
imshow(im, y, delta, name="transform")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Transform image to have a top down view')
parser.add_argument('-i', '--input_image', help='Input image', type=str, required=True)
args = parser.parse_args()
image = cv2.imread(args.input_image)
assert image is not None, "Could not read image"
orignal_image = image.copy()
warp = cv2.warpPerspective(orignal_image, np.dot(T, M), (new_width, new_height))
imshow(image, name="original")
showTransform(image, y, delta)
imshow(warp, name="warped")
plt.show()
| Python | 0 | |
77dfcc41b718ed26e9291b9efc47b0589b951fb8 | Create 0001.py | pylyria/0001/0001.py | pylyria/0001/0001.py | 1
| Python | 0.000252 | |
d412ec65777431cdd696593ddecd0ee37a500b25 | Create 0011.py | pylyria/0011/0011.py | pylyria/0011/0011.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
def is_sensitive(word):
sensitive_words = [line.strip() for line in open('sensitive.txt', encoding='utf-8')]
word = word.strip()
if word.lower() in sensitive_words:
return True
else:
return False
if __name__ == "__main__":
while 1:
if is_sensitive(input()):
print('Freedom')
else:
print('Human Rights')
| Python | 0.000054 | |
0f4700ee5fccb0cc3996f19f4d6afae7fe1da3a0 | Add file for concatenating and splitting bzip files | python/split_bzip.py | python/split_bzip.py | #!/usr/bin/env python
#######
####### split_bzip.py
#######
####### Copyright (c) 2011 Ben Wing.
#######
import sys, re
import math
import fileinput
from subprocess import *
from nlputil import *
import itertools
import time
import os.path
import traceback
############################################################################
# Quick Start #
############################################################################
# This program reads in data from the specified bzipped files, concatenates
# them, splits them at newlines after a certain amount of data has been
# read, and bzips the results. The files are assumed to contains tweets in
# JSON format, and the resulting split files after named after the date
# in the first tweet of the split. We take some care to ensure that we
# start the file with a valid tweet, in case something invalid is in the
# file.
#######################################################################
### Utility functions ###
#######################################################################
def uniprint(text, file=sys.stdout):
'''Print Unicode text in UTF-8, so it can be output without errors'''
if type(text) is unicode:
print text.encode("utf-8")
else:
print text
def errprint(text):
uniprint(text, sys.stderr)
def warning(text):
'''Output a warning, formatting into UTF-8 as necessary'''
if show_warnings:
uniprint("Warning: %s" % text)
#######################################################################
# Process files #
#######################################################################
def finish_outproc(outproc):
outproc.stdin.close()
errprint("Waiting for termination ...")
outproc.wait()
errprint("Waiting for termination ... done.")
def split_tweet_bzip_files(opts, args):
status = StatusMessage("tweet")
totalsize = 0
outproc = None
lines_at_start = []
for infile in args:
inproc = Popen("bzcat", stdin=open(infile, "rb"), stdout=PIPE)
for full_line in inproc.stdout:
line = full_line[:-1]
status.item_processed()
if not line.startswith('{"'):
errprint("Unparsable line, not JSON?, #%s: %s" % (status.num_processed(), line))
else:
if not outproc or totalsize >= opts.split_size:
json = None
try:
json = split_json(line)
except Exception, exc:
errprint("Exception parsing JSON in line #%s: %s" % (status.num_processed(), line))
errprint("Exception is %s" % exc)
traceback.print_exc()
if json:
json = json[0]
#errprint("Processing JSON %s:" % json)
#errprint("Length: %s" % len(json))
for i in xrange(len(json)):
#errprint("Saw %s=%s" % (i, json[i]))
if json[i] == '"created_at"':
#errprint("Saw created")
if i + 2 >= len(json) or json[i+1] != ':' or json[i+2][0] != '"' or json[i+2][-1] != '"':
errprint("Something weird with JSON in line #%s, around here: %s" % (status.num_processed(), json[i-1:i+4]))
else:
json_time = json[i+2][1:-1].replace(" +0000 ", " UTC ")
tweet_time = time.strptime(json_time,
"%a %b %d %H:%M:%S %Z %Y")
if not tweet_time:
errprint("Can't parse time in line #%s: %s" % (status.num_processed(), json_time))
else:
timesuff = time.strftime("%Y-%M-%d.%H%M", tweet_time)
def make_filename(suff):
return opts.output_prefix + suff + ".bz2"
outfile = make_filename(timesuff)
if os.path.exists(outfile):
errprint("Warning, path %s exists, not overwriting" % outfile)
for ind in itertools.count(1):
# Use _ not - because - sorts before the . of .bz2 but
# _ sorts after (as well as after all letters and numbers).
outfile = make_filename(timesuff + ("_%03d" % ind))
if not os.path.exists(outfile):
break
if outproc:
finish_outproc(outproc)
errprint("About to write to %s..." % outfile)
outproc = Popen("bzip2", stdin=PIPE, stdout=open(outfile, "wb"))
totalsize = 0
break
if outproc:
starttext = ''.join(lines_at_start)
if lines_at_start:
outproc.stdin.write(starttext)
totalsize += len(starttext)
lines_at_start = None # So we can't add any more lines to catch bugs
outproc.stdin.write(full_line)
totalsize += len(full_line)
else:
lines_at_start += [full_line]
if outproc:
finish_outproc(outproc)
# A very simply JSON splitter. Doesn't take the next step of assembling
# into dictionaries, but easily could.
def split_json(line):
split = re.split(r'("(?:\\.|[^"])*?"|[][:{},])', line)
split = (x for x in split if x) # Filter out empty strings
curind = 0
def get_nested(endnest):
nest = []
try:
while True:
item = next(split)
if item == endnest:
return nest
elif item == '{':
nest += [get_nested('}')]
elif item == '[':
nest += [get_nested(']')]
else:
nest += [item]
except StopIteration:
if not endnest:
return nest
else:
raise
return get_nested(None)
#######################################################################
# Main code #
#######################################################################
def main():
op = OptionParser(usage="%prog [options] input_dir")
op.add_option("-s", "--split-size", metavar="SIZE",
type="int", default=1000000000,
help="""Size (uncompressed) of each split. Note that JSON
tweets compress in bzip about 8 to 1, hence 1 GB is a good uncompressed size
for Hadoop. Default %default.""")
op.add_option("-o", "--output-prefix", metavar="PREFIX",
help="""Prefix to use for all splits.""")
opts, args = op.parse_args()
if not opts.output_prefix:
op.error("Must specify output prefix using -o or --output-prefix")
if not args:
op.error("No input files specified")
split_tweet_bzip_files(opts, args)
main()
| Python | 0 | |
db38238b26c4050122c0902f162c1dc84358e66d | Create features.py | dasem/features.py | dasem/features.py | """features.
Usage:
dasem.features lines-to-feature-matrix [options]
Options:
--debug Debug messages.
-h --help Help message
-i --input=<file> Input file
--ie=encoding Input encoding [default: utf-8]
--oe=encoding Output encoding [default: utf-8]
-o --output=<file> Output filename, default output to stdout
--separator=<sep> Separator [default: |]
--verbose Verbose messages.
"""
from __future__ import absolute_import, division, print_function
import codecs
import logging
import signal
import sys
from afinn import Afinn
from nltk import WordPunctTokenizer
import numpy as np
from sklearn.base import BaseEstimator
class FeatureExtractor(BaseEstimator):
"""Feature extractor for Danish texts."""
def __init__(self):
"""Setup text processors."""
self.afinn = Afinn(language='da')
self.word_tokenizer = WordPunctTokenizer()
def partial_fit(self, Y, y=None):
"""Fit model.
This is a dummy function.
"""
return self
def fit(self, X, y=None):
"""Fit model.
This is a dummy function.
"""
return self
@property
def features_(self):
"""Setup features."""
features = [
'n_characters',
'n_words',
'n_unique_words',
'afinn_sum_valence',
'afinn_sum_arousal',
'afinn_sum_ambiguity'
]
return features
def transform(self, raw_documents, y=None):
"""Transform documents to features.
Parameters
----------
raw_documents : iterable over str
Iterable with corpus to be transformed.
y : numpy.array
Target (not used, dummy parameter).
"""
X = []
for n, document in enumerate(raw_documents):
words = self.word_tokenizer.tokenize(document)
unique_words = set(words)
scores = self.afinn.scores(document)
sum_valence = sum(scores)
sum_arousal = np.sum(np.abs(scores))
X.append([
len(document),
len(words),
len(unique_words),
sum_valence,
sum_arousal,
sum_arousal - abs(sum_valence)
])
X = np.array(X)
return X
fit_transform = transform
def main():
"""Handle command-line interface."""
from docopt import docopt
arguments = docopt(__doc__)
logging_level = logging.WARN
if arguments['--debug']:
logging_level = logging.DEBUG
elif arguments['--verbose']:
logging_level = logging.INFO
logger = logging.getLogger()
logger.setLevel(logging_level)
logging_handler = logging.StreamHandler()
logging_handler.setLevel(logging_level)
logging_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging_handler.setFormatter(logging_formatter)
logger.addHandler(logging_handler)
# Ignore broken pipe errors
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if arguments['--output']:
output_filename = arguments['--output']
else:
output_filename = None
if arguments['--input']:
input_filename = arguments['--input']
else:
input_filename = None
input_encoding = arguments['--ie']
if arguments['lines-to-feature-matrix']:
extractor = FeatureExtractor()
version = int(sys.version.split('.')[0])
if input_filename:
input_file = codecs.open(input_filename, encoding=input_encoding)
else:
if version == 2:
input_file = codecs.getreader(input_encoding)(sys.stdin)
elif version == 3:
input_file = codecs.getreader(input_encoding)(sys.stdin.buffer)
else:
assert False
input_filename = 'STDIN'
logger.info('Reading text from {}'.format(input_filename))
X = extractor.transform(input_file)
header = "," + ",".join(extractor.features_)
if output_filename:
logger.info('Writing data to {}'.format(output_filename))
np.savetxt(output_filename, X, header=header)
else:
logger.info('Writing data to STDOUT'.format(output_filename))
if version == 2:
np.savetxt(sys.stdout, X, header=header)
elif version == 3:
np.savetxt(sys.stdout.buffer, X, header=header)
else:
assert False
else:
print(__doc__)
if __name__ == "__main__":
main()
| Python | 0 | |
5052318d2802284a0331fc77fd7d02bdaca39f42 | test if a layer is working fine | scripts/feature_extract_test.py | scripts/feature_extract_test.py | """Feature extraction test"""
import numpy as np;
import sys
import theano;
import theano.tensor as T;
sys.path.append("..")
import scae_destin.datasets as ds;
from scae_destin.convnet import ReLUConvLayer;
from scae_destin.convnet import LCNLayer
n_epochs=1;
batch_size=100;
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("/home/tejas/Desktop/cifar-10-batches-py");
Xtr=np.mean(Xtr, 3);
Xte=np.mean(Xte, 3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 32, 32))
layer_0=LCNLayer(filter_size=(7,7),
num_filters=50,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size,
border_mode="full");
extract=theano.function(inputs=[idx],
outputs=layer_0.apply(images),
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print extract(1).shape | Python | 0.000004 | |
47ebaa10068313c9b8fbbf2e3ffcf06597f88ff6 | add npy2png file converter | convert_npy2image.py | convert_npy2image.py | import sys
import math
import copy
import pylab
import numpy
from Image import fromarray
from scipy.misc import imread, toimage
cmin = 0
cmax = 2**8 - 1
def convert(file_in, file_out, index=None) :
i = 0
max_count = 0
while (True) :
try :
input_image = numpy.load(file_in + '/image_%07d.npy' % (i))
except Exception :
break
output_image = file_out + '/image_%07d.png' % (i)
#output_image = file_out + '/image_%07d.png' % (i/26)
# data for tirfm
#image_array = input_image[256-25:256+25,256-25:256+26,1]
#image_array = input_image[256-76:256+76,256-78:256+78,1]
#image_array = input_image[300-50:300+50,300-50:300+50,1]
#image_array = input_image[512-45:512+45,512-45:512+45,1]
image_array = input_image[:,:,1]
#image_exp += numpy.array(image_array)
amax = numpy.amax(image_array)
amin = numpy.amin(image_array)
if (max_count < amax) :
max_count = amax
#print i/26, amax, amin
print i, amax, amin
# 16-bit data format
#image_array.astype('uint16')
#toimage(image_array, low=cmin, high=cmax, mode='I').save(output_image)
# 8-bit data format (for making movie)
toimage(image_array, cmin=cmin, cmax=cmax).save(output_image)
#i += 26
i += 1
print 'Max count : ', max_count, 'ADC'
if __name__=='__main__':
file_in = '/home/masaki/microscopy/images'
file_out = '/home/masaki/microscopy/images_png'
convert(file_in, file_out)
| Python | 0 | |
211e9e9352234f5638036b5b1ec85f998609d587 | Add a primitive MITM proxy | diana/utils/proxy.py | diana/utils/proxy.py | from diana import packet
import argparse
import asyncio
import sys
import socket
from functools import partial
class Buffer:
def __init__(self, provenance):
self.buffer = b''
self.provenance = provenance
def eat(self, data):
self.buffer += data
packets, self.buffer = packet.decode(self.buffer, provenance=self.provenance)
return packets
BLOCKSIZE = 1024
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple Artemis SBS proxy')
parser.add_argument('proxy_port', type=int, help='Server port')
parser.add_argument('address', help='Server address (DNS, IPv4 or IPv6)')
parser.add_argument('port', type=int, nargs='?', default=2010, help='Server port')
args = parser.parse_args()
loop = asyncio.get_event_loop()
@asyncio.coroutine
def transit(reader, writer, provenance, tag):
buf = Buffer(provenance)
while True:
data = yield from reader.read(BLOCKSIZE)
for pkt in buf.eat(data):
writer.write(packet.encode(pkt, provenance=provenance))
sys.stdout.write('{} {}\n'.format(tag, pkt))
sys.stdout.flush()
@asyncio.coroutine
def handle_p2c(client_reader, client_writer):
server_reader, server_writer = yield from asyncio.open_connection(args.address,
args.port,
loop=loop)
asyncio.async(transit(client_reader, server_writer,
provenance=packet.PacketProvenance.client,
tag='[C>S]'), loop=loop)
asyncio.async(transit(server_reader, client_writer,
provenance=packet.PacketProvenance.server,
tag='[C<S]'), loop=loop)
svr = asyncio.start_server(handle_p2c, '127.0.0.1', args.proxy_port, loop=loop)
server = loop.run_until_complete(svr)
loop.run_forever()
| Python | 0.000066 | |
e6c60057a3c5d3f985633bdcbc0a6477d9ebe6c4 | Add MediaSaver.py | tools/tcam-capture/tcam_capture/MediaSaver.py | tools/tcam-capture/tcam_capture/MediaSaver.py | # Copyright 2018 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .Encoder import MediaType, get_encoder_dict, Encoder
from .FileNameGenerator import FileNameGenerator
from PyQt5.QtCore import pyqtSignal, QObject
import os
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
import logging
log = logging.getLogger(__name__)
class MediaSaver(QObject):
""""""
saved = pyqtSignal(str)
error = pyqtSignal(str)
def __init__(self, serial,
caps: Gst.Caps,
media_type: MediaType):
""""""
super(MediaSaver, self).__init__()
self.pipeline = None
self.src = None
self.serial = serial
self.index = 0
self.location = "/tmp/"
self.media_type = media_type
self.encoder_dict = get_encoder_dict()
self.video_encoder = self.encoder_dict["mpeg2"]
self.image_encoder = self.encoder_dict["png"]
self.caps = caps
self.accept_buffer = False
self.fng = FileNameGenerator()
self.working = False
self.queue_counter = 0
self.src_name = ""
self.sink_name = ""
self.pipeline_name = ""
self.__find_element_names()
def __del__(self):
"""
This destructor exists to ensure a graceful exit of all gstreamer pipelines
"""
if self.pipeline:
self.pipeline.set_state(Gst.State.NULL)
self.src = None
self.pipeline = None
def __find_element_names(self):
"""
Create strings describing the element names for out instance
"""
if self.media_type == MediaType.image:
self.src_name = "mediasaver-image-src"
self.sink_name = "mediasaver-image-sink"
self.pipeline_name = "mediasaver-image-pipeline"
else:
self.src_name = "mediasaver-video-src"
self.sink_name = "mediasaver-video-sink"
self.pipeline_name = "mediasaver-video-pipeline"
def set_encoder(self, enc_str: str):
if self.encoder_dict[enc_str].encoder_type is not MediaType.video:
return False
self.selected_video_encoder = self.encoder_dict[enc_str]
return True
def set_caps(self, caps: Gst.Caps):
self.caps = caps
def _generate_location(self):
"""
Generate the location string that is used by either
filesink or multifilesink for saving files
"""
fmt = FileNameGenerator.caps_to_fmt_string(self.caps)
self.fng._create_replacement_dict(self.serial,
fmt,
self.index,
self._select_encoder().file_ending)
return (self.location + "/" + self.serial + "-%d." + self._select_encoder().file_ending)
# return (self.location + "/" +
# self.fng.create_file_name(self.serial, fmt, counter=self.index, file_suffix=self._select_encoder().file_ending))
def _select_encoder(self):
""""""
if self.media_type == MediaType.image:
return self.image_encoder
return self.video_encoder
def _select_sink(self):
"""
Select the appropriate gstreamer sink
"""
if self.media_type == MediaType.image:
return "multifilesink post-messages=true"
return "filesink"
def _create_pipeline(self):
"""
Create a GstPipeline that contains our encoding, etc
"""
location = self._generate_location()
encoder = self._select_encoder()
sink_str = self._select_sink()
save_str = ("appsrc name={} is-live=true format=3 "
"! {} "
"! queue leaky=downstream "
"! videoconvert "
"! {} "
"! {} name={} location={}").format(self.src_name,
self.caps.to_string(),
encoder.module,
sink_str,
self.sink_name,
location)
log.info("Using pipeline to save: '{}'".format(save_str))
self.pipeline = Gst.parse_launch(save_str)
self.src = self.pipeline.get_by_name(self.src_name)
self.src.set_property("caps", self.caps)
self.src.set_property("do-timestamp", True)
self.pipeline.set_name(self.pipeline_name)
def _bus_call(self, gst_bus, message):
"""
"""
t = message.type
# log.info("Received msg from {}".format(message.src.get_name()))
if message.src.get_name() == self.sink_name:
log.info("{}".format(message.get_structure().to_string()))
log.info("{}".format(message.get_structure().get_string("filename")))
self.saved.emit(message.get_structure().get_string("filename"))
if t == Gst.MessageType.EOS:
log.info("Received EOS from {}".format(message.src.get_name()))
if (message.src.get_name() == self.sink_name and
self.media_type == MediaType.video):
self.saved.emit("")
log.info("sink sent EOS {}".format(message.get_structure().to_string()))
self.pipeline.set_state(Gst.State.NULL)
self.src = None
self.pipeline = None
self.working = False
def feed_image(self, gstbuffer: Gst.Buffer):
"""
Feed gstbuffer into the pipeline
"""
if not self.working or not self.accept_buffer:
return
if self.src:
self.src.emit("push-buffer", gstbuffer)
if self.media_type == MediaType.image:
log.info("pushing buffer")
self.queue_counter -= 1
if self.queue_counter == 0:
self.accept_buffer = False
def start_recording_video(self, encoder):
"""
Start saving a video
"""
if self.working:
return
if self.media_type != MediaType.video:
return
self._create_pipeline()
self.pipeline.set_state(Gst.State.PLAYING)
self.working = True
self.accept_buffer = True
log.info("Saving to....{}".format(self._generate_location()))
self.index += 1
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", self._bus_call)
def stop_recording_video(self):
"""
Stop the video pipeline
"""
if self.media_type != MediaType.video:
return
self.accept_buffer = False
self.pipeline.send_event(Gst.Event.new_eos())
log.debug("Sent EOS to saving pipeline")
def save_image(self, encoder: Encoder):
"""
Trigger the saving of a single image
"""
if self.media_type != MediaType.image:
return
if encoder.encoder_type != MediaType.image:
log.error("Specified encoder can not be used for images. Aborting.")
return
self.image_encoder = encoder
if not self.pipeline:
self._create_pipeline()
self.pipeline.set_state(Gst.State.PLAYING)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", self._bus_call)
self.working = True
self.accept_buffer = True
self.queue_counter += 1
| Python | 0 | |
d890ef34b11200738687ec49a4a005bb9ebe7c2a | make the module executable | distance/__main__.py | distance/__main__.py | #!/usr/bin/env python
from . import __version__
print(f"distanceutils version {__version__}")
# vim:set sw=4 ts=8 sts=4 et:
| Python | 0 | |
768b61316a10726a3281a514823f280abc142356 | move wild into its own folder | tests/integration/test_wild.py | tests/integration/test_wild.py | import pytest
requests = pytest.importorskip("requests")
import vcr
def test_domain_redirect():
'''Ensure that redirects across domains are considered unique'''
# In this example, seomoz.org redirects to moz.com, and if those
# requests are considered identical, then we'll be stuck in a redirect
# loop.
url = 'http://seomoz.org/'
with vcr.use_cassette('domain_redirect.yaml') as cass:
requests.get(url, headers={'User-Agent': 'vcrpy-test'})
# Ensure that we've now served two responses. One for the original
# redirect, and a second for the actual fetch
assert len(cass) == 2
| Python | 0 | |
c193aebdc76eae285df402463c149bef328c05ef | Add backwards-compatible registration.urls, but have it warn pending deprecation. | registration/urls.py | registration/urls.py | import warnings
warnings.warn("Using include('registration.urls') is deprecated; use include('registration.backends.default.urls') instead",
PendingDeprecationWarning)
from registration.backends.default.urls import *
| Python | 0 | |
fe88e0d8dc3d513cd11ef9ab4cb3ea332af99202 | Add migration | organization/network/migrations/0112_auto_20180502_1742.py | organization/network/migrations/0112_auto_20180502_1742.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2018-05-02 15:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization-network', '0111_auto_20180307_1152'),
]
operations = [
migrations.AddField(
model_name='organization',
name='is_main',
field=models.BooleanField(default=False, verbose_name='is main'),
),
migrations.AddField(
model_name='teamlink',
name='title_en',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='teamlink',
name='title_fr',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='title'),
),
]
| Python | 0.000002 | |
b82c7343af06c19e6938bd27359289ab067db1e9 | add expectation core (#4357) | contrib/experimental/great_expectations_experimental/expectations/expect_column_sum_to_be.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_sum_to_be.py | """
This is a template for creating custom ColumnExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_aggregate_expectations
"""
from typing import Dict, Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.expectations.expectation import ColumnExpectation
# This class defines the Expectation itself
class ExpectColumnSumToBe(ColumnExpectation):
"""Expect the sum of a column to be exactly a value."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {"a": [1, 2, 3, 4, 5]},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "sum_total": 15},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "sum_total": 14},
"out": {"success": False},
},
],
"test_backends": [
{
"backend": "pandas",
"dialects": None,
},
{
"backend": "sqlalchemy",
"dialects": ["sqlite", "postgresql"],
},
{
"backend": "spark",
"dialects": None,
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.sum",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = ("sum_total",)
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
actual_value = metrics["column.sum"]
predicted_value = self.get_success_kwargs(configuration).get("sum_total")
success = actual_value == predicted_value
return {"success": success, "result": {"observed_value": actual_value}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"column aggregate expectation",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@joshua-stauffer", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnSumToBe().print_diagnostic_checklist()
| Python | 0 | |
10001d5c611e59dd426d829fa7c2242b5f93df0d | add element collection base | watir_snake/element_collection.py | watir_snake/element_collection.py | from importlib import import_module
import watir_snake
class ElementCollection(object):
# TODO: include Enumerable
def __init__(self, query_scope, selector):
self.query_scope = query_scope
self.selector = selector
self.as_list = []
self.elements = []
def __iter__(self):
"""
Yields each element in collection
:rtype: iter
:Example:
divs = browser.divs(class='kls')
for div in divs:
print(div.text)
"""
for e in self.to_list:
yield e
def __len__(self):
"""
Returns the number of elements in the collection
:rtype: int
"""
return len(self.to_list)
def __getitem__(self, idx):
"""
Get the element at the given index
Also note that because of lazy loading, this will return an Element instance even if
the index is out of bounds
:param idx: index of wanted element, 0-indexed
:type idx: int
:return: instance of Element subclass
:rtype: watir_snake.elements.element.Element
"""
return self.to_list[idx] or self._element_class(self.query_scope,
dict(index=idx, **self.selector))
@property
def to_list(self):
"""
This collection as a list
:rtype: list[watir_snake.elements.element.Element
"""
if not self.as_list:
elements = []
for idx, e in enumerate(self._elements):
element = self._element_class(self.query_scope, dict(index=idx, **self.selector))
if self._element_class == watir_snake.elements.HTMLElement:
elements.append(element.to_subtype())
else:
elements.append(element)
self.as_list = elements
return self.as_list
def __eq__(self, other):
"""
Returns true if two element collections are equal.
:param other: other collection
:rtype: bool
:Example:
browser.select_list(name='new_user_languages').options == \
browser.select_list(id='new_user_languages').options #=> True
browser.select_list(name=;new_user_role').options == \
browser.select_list(id='new_user_languages').options #=> false
"""
return self.to_list == other.to_list
eql = __eq__
# private
@property
def _elements(self):
if isinstance(self._query_scope, watir_snake.elements.IFrame):
self.query_scope.switch_to()
else:
getattr(self.query_scope, 'assert_exists')()
element_validator = self._element_validator_class()
selector_builder = self._selector_builder_class(self.query_scope, self.selector,
self._element_class.attribute_list)
locator = self._locator_class(self.query_scope, self.selector, self._selector_builder,
self._element_validator)
if not self.elements:
self.elements = self.locator.locate_all()
return self.elements
@property
def _locator_class(self):
return self._import_module.Locator
@property
def _element_validator_class(self):
return self._import_module.Validator
@property
def _selector_builder_class(self):
return self._import_module.SelectorBuilder
@property
def _import_module(self):
modules = [watir_snake.locator_namespace.__name__, self._element_class_name.lower()]
try:
return import_module('watir_snake.{}.{}.locator'.format(*modules))
except ImportError:
return import_module('watir_snake.{}.element.locator'.format(*modules[:1]))
@property
def _element_class_name(self):
return self._element_class.__name__
@property
def _element_class(self):
return getattr(watir_snake.elements, self.__class__.__name__.replace('Collection', ''))
| Python | 0 | |
a27c9a8ddf6ab1cd264b02afc95754da6b4bb058 | Add partial indexes | django-more/indexes.py | django-more/indexes.py | """ Define custom index types useful for SID and utils """
import hashlib
from django.db.models import Index, Q
from django.db import DEFAULT_DB_ALIAS
__all__ = ['PartialIndex']
class PartialIndex(Index):
suffix = "par"
def __init__(self, *args, fields=[], name=None, **kwargs):
self.q_filters = [arg for arg in args if isinstance(arg, Q)]
if kwargs:
self.q_filters.extend([Q(**{kwarg: val}) for kwarg, val in kwargs.items()])
super().__init__(fields, name)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
self.make_qs_compatible()
args += tuple(self.q_filters)
return path, args, kwargs
@staticmethod
def get_where_sql(query):
where, w_params = query.get_compiler(DEFAULT_DB_ALIAS).compile(query.where)
return " WHERE {}".format(where % (*w_params,))
def get_query(self, model):
return model.objects.filter(*self.q_filters).query
def get_sql_create_template_values(self, model, schema_editor, using):
parameters = super().get_sql_create_template_values(model, schema_editor, using=using)
# Create a queryset using the supplied filters to validate and generate WHERE
query = self.get_query(model)
# Access query compiler for WHERE directly
if query.where:
parameters["extra"] = self.get_where_sql(query)
return parameters
def make_qs_compatible(self):
if not hasattr(Q, "deconstruct"):
for q in [qf for qf in self.q_filters if isinstance(qf, Q)]:
q.__class__ = Qcompat
# Almost identical to default implementation but adds WHERE to hashing
def set_name_with_model(self, model):
table_name = model._meta.db_table
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
hash_data = [table_name] + column_names_with_order + [self.suffix] + [self.get_where_sql(self.get_query(model))]
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (self._hash_generator(*hash_data), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
self.check_name()
def __eq__(self, val):
if isinstance(val, PartialIndex):
# Use cheap repr() comparison on deconstruction to check if the same
return repr(self.deconstruct()) == repr(val.deconstruct())
# This feature is not present in Django 1.11 but is required for deconstruction of
# partial indexes. So if not present when needed, the Qs are wrapped in this
class Qcompat(Q):
def __init__(self, *args, **kwargs):
connector = kwargs.pop('_connector', None)
negated = kwargs.pop('_negated', False)
super(Q, self).__init__(children=list(args) + list(kwargs.items()), connector=connector, negated=negated)
def deconstruct(self):
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
args, kwargs = (), {}
if len(self.children) == 1 and not isinstance(self.children[0], Q):
child = self.children[0]
kwargs = {child[0]: child[1]}
else:
args = tuple(self.children)
kwargs = {'_connector': self.connector}
if self.negated:
kwargs['_negated'] = True
return path, args, kwargs
| Python | 0.000056 | |
2f582fa86aa5a8d47a066b4b47fd3425377dc05c | question 1.8 | crack_1_8.py | crack_1_8.py | '''
according http://hawstein.com/posts/1.8.html
algorithm is that str1= "12345",str2= "51234"
str1 = str1 + str1 = "1234512345"
as a result, str2 is subString of str1
'''
str1 = 'abcdefghi'
str2 = 'ihgfedcba'
def isSubString(str1, str2):
if str2 in str1:
return True
return False
def isRotation(str1, str2):
if isSubString(str1+str2, str2):
return True
return False
print isRotation(str1, str2) | Python | 0.999705 | |
07fcdfe3da7d5ffda3ff7139b2f8cd0f02a5ad06 | Create xml_to_text_new.py | xml_conversion/xml_to_text_new.py | xml_conversion/xml_to_text_new.py | ##Imports
import xml.etree.cElementTree as ET
from glob import glob
from time import time
import os
#############################################################################
# NOTE: When importing xml files, make sure the distances do not change #
# between files in the same folder. This will lead to errors #
#############################################################################
##!>Set working directory to correct folder (BR-DTS-Processing)
#working_directory = r'D:\Github\BR-DTS-Processing'
working_directory = r'C:\Users\Bart\Downloads\BR-DTS-Processing-master'
os.chdir(working_directory)
##Write data to txt file
#Get start time
ta = time()
#Get all xml files from the directory
#leave different channels in different directories!
file_names = sorted(glob(r'xml_conversion\\xml_example_2016\*.xml'))
file_amount = len(file_names)
#Initialise variables
timestamp = [None]
pt100 = [None]
data = [None]
#Open output file, write header
data_filename = r'xml_conversion\output\dts_data_V2.txt'
data_file = open(data_filename, 'w')
data_file.write('Ultima data_file. Next row; distances (m).')
#Get distances from xml
tree = ET.ElementTree(file=file_names[0])
root = tree.getroot()
start_index = float(root[0][4].text)
end_index = float(root[0][5].text)
increment = float(root[0][6].text)
start_time = root[0][7].text
end_time = root[0][8].text
logdata = [x.text for x in root[0][15]]
data_strings = logdata[2:]
data_length = len(data_strings)
temp = [None]*data_length
for ii in range(0, data_length):
temp[ii] = float(data_strings[ii].split(',')[3])
diff = (end_index - start_index)/(data_length - 1)
distances = [str(diff * x + start_index)[0:9] for x in range(data_length)]
#Write distances to file
data_file.write('\n'+';'.join(distances))
#Write Time & temperature header
data_file.write('\nTime\tTemperature')
#Loop over all files and extract the
for ii in range(0,file_amount):
tree = ET.parse(file_names[ii])
root = tree.getroot()
#test if start or end indexes have changed (untested function):
if not (float(root[0][4].text) == start_index and float(root[0][5].text) == end_index):
raise Exception('Distance of file '+file_names[0]+' does not match starting indexes! \n'
'Check if settings were changed in between files')
#Copy timestamp from DTS to .txt
timestamp = root[0][8].text[:-5]
#Get the data values
logdata = [x.text for x in root[0][15]]
data_strings = logdata[2:]
#get the temperature from the xml
#Define full list first, then add values (for speed)
temperature = [None]*data_length
for ii in range(0, data_length):
temperature[ii] = data_strings[ii].split(',')[3][:-1]
#Append to file
file_line = '\n'+timestamp+'\t'+';'.join(temperature)
data_file.write(file_line)
data_file.close()
#Print elapsed time; for code optimization
print('Elapsed time:',time()-ta)
| Python | 0.000001 | |
48e4b9692b29d3fb9f43f37fef70ccc41f47fc0e | Add tests for the errors utility functions | yithlibraryserver/tests/errors.py | yithlibraryserver/tests/errors.py | import unittest
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from yithlibraryserver.errors import password_not_found, invalid_password_id
class ErrorsTests(unittest.TestCase):
def test_password_not_found(self):
result = password_not_found()
self.assertTrue(isinstance(result, HTTPNotFound))
self.assertTrue(result.content_type, 'application/json')
self.assertTrue(result.body, '{"message": "Password not found"}')
# try a different message
result = password_not_found('test')
self.assertTrue(result.body, '{"message": "test"}')
def test_invalid_password_id(self):
result = invalid_password_id()
self.assertTrue(isinstance(result, HTTPBadRequest))
self.assertTrue(result.content_type, 'application/json')
self.assertTrue(result.body, '{"message": "Invalid password id"}')
# try a different message
result = invalid_password_id('test')
self.assertTrue(result.body, '{"message": "test"}')
| Python | 0.000001 | |
e88795cb11503261c111a0f2c8353a29a7dc1ccc | Add face-aware average | average.py | average.py | import os
import sys
import numpy as np
import time
import cv2
# Deterimens if a photo is a face and returns a scaled image if it is. Returns
# False if it is not a face.
# Params:
# path - path to the image
# haar_map - path to the haar_cascade training data
# pw - the picture width
# ph - the picture height
def is_face( path, haar_map, pw, ph ):
good_face = True
face_cascade = cv2.CascadeClassifier( haar_map )
#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
#smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
img = cv2.imread( path )
try:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except cv2.error:
# Some images have problems being converted to grayscale
print 'Couldn\'t convert to grayscale'
return False
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw rectangles around detected features
#for (x,y,w,h) in faces:
# Draws a square around the face
#cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#roi_gray = gray[y:y+h, x:x+w]
#roi_color = img[y:y+h, x:x+w]
# eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 5)
# for (ex,ey,ew,eh) in eyes:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
# smiles = smile_cascade.detectMultiScale(roi_gray, 1.1, 8)
# for (sx,sy,sw,sh) in smiles:
# cv2.rectangle(roi_color,(sx,sy),(sx+sw,sy+sh),(0,255,0),2)
# Check that there is only one face and that is large enough
if( len(faces) != 1 ):
# not a single face
good_face = False
else:
# Dimensions of the face
x,y,w,h = faces[0]
if( w < 100 or h < 100 ):
# too small
good_face = False
# If those conditions were met
if( good_face ):
img = center_face(img, x, y, w, h, pw, ph)
return cv2.resize(img, ( pw, ph ) )
else:
return good_face
# Center a face in the image and return the new image
# Params:
# img - the face image
# x,y - the coords of the bottom left corner of the face
# w,h - the dimensions of the face
# pw,ph - the image dimensions
def center_face(img, x, y, w, h, pw, ph):
# Margins
m1 = 0.2*pw
m2 = 0.8*pw
pts1 = np.float32([[x,y],[x+w,y],[x,y+h],[x+w,y+h]])
pts2 = np.float32([[m1,m1],[m2,m1],[m1,m2],[m2,m2]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(pw,ph))
return dst
# Shows a face and waits for user to press a key. exit the program if the
# escape key is pressed
# Params
# img - the image to display
def show_face( img ):
cv2.destroyAllWindows()
cv2.imshow('img',img)
k = cv2.waitKey(50)
if k == 27 and accepted > 0:
exit()
################################################################################
def main( prof_path, output_path='output', haar_map='haarcascade_frontalface_default.xml' ):
# Access all JPG files in directory
imlist = [os.path.join(root, name)
for root, dirs, files in os.walk( os.path.join(os.getcwd(), prof_path) )
for name in files
if name.endswith((".jpeg", ".jpg"))]
# Make output directory if it doesn't exist
if( not os.path.isdir(output_path) ):
os.makedirs( output_path )
# Assuming all images are the same size, get dimensions of first image
img = cv2.imread(imlist[0],0)
h, w = img.shape[:2]
# Book keeping
N = len(imlist)
accepted = 0.0
rate = 0.0
avg_face = None
print 'Found', N, 'profiles'
for im in range( N ):
try:
# Face match
face = is_face(imlist[im], haar_map, w, h)
# It returned a scaled face
if( type(face) != bool ):
if( avg_face == None ):
avg_face = face
else:
# Do running average
alpha = 1.0/(float(accepted) + 1.0)
#alpha = np.ceil(alpha*100.0)/100.0
avg_face = cv2.addWeighted(face, alpha, avg_face, 1.0-alpha, 0.0, avg_face)
#show_face(avg_face)
accepted += 1.0
except IOError:
print 'Couldn\'t open image',im
# Give Status report
if ( accepted % 100 == 1 ):
print 'Progress:', im,'/',N,'=',100.0*np.round(float(im)/float(N)*1000.0)/1000.0,'%'
rate = accepted/float(im+1)
print 'Accepted:', accepted,'/',N,'=',np.round(rate*100000.0)/1000.0,'%'
if( avg_face != None ):
#show_face( avg_face )
print 'Saving Current Average'
img_out = os.path.join(output_path, 'Average_Face_'+str(int(accepted))+'.png')
cv2.imwrite( img_out, avg_face )
#avg_out = None
cv2.destroyAllWindows()
if(len(sys.argv) < 2):
print 'Usage: python average.py profiles_dir/ <output/> <haar_cascade_default.xml>'
elif( len(sys.argv) == 2 ):
main( sys.argv[1] )
elif( len(sys.argv) == 3 ):
main( sys.argv[1], sys.argv[2] )
elif( len(sys.argv) == 4 ):
main( sys.argv[1], sys.argv[2], sys.argv[3] )
| Python | 0.998734 | |
4c225ec7cdafc45840b2459e8804df5818fecd71 | add util module | dace/util.py | dace/util.py | from pyramid.threadlocal import get_current_request
from substanced.util import find_objectmap
def get_obj(oid):
request = get_current_request()
objectmap = find_objectmap(request.root)
obj = objectmap.object_for(oid)
return obj
| Python | 0.000001 | |
ddfc28360941a435ae22705dbc46b44cced588e7 | Add demo file. | demo/demo.py | demo/demo.py | #!/usr/bin/env python3
import fileinput
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import ppp_spell_checker
if __name__ == "__main__":
corrector = ppp_spell_checker.StringCorrector('en')
while(True):
print(corrector.correctString(input("")))
| Python | 0 | |
0f9488588ea66b881cdebf11a42377cb44845a5c | added day6-1a.py. combines transposing input list and function call into single list comprehension | day6/day6-1a.py | day6/day6-1a.py | """--- Day 6: Signals and Noise ---
Something is jamming your communications with Santa. Fortunately, your signal is only partially jammed, and protocol in situations like this is to switch to a simple repetition code to get the message through.
In this model, the same message is sent repeatedly. You've recorded the repeating message signal (your puzzle input), but the data seems quite corrupted - almost too badly to recover. Almost.
All you need to do is figure out which character is most frequent for each position. For example, suppose you had recorded the following messages:
eedadn
drvtee
eandsr
raavrd
atevrs
tsrnev
sdttsa
rasrtv
nssdts
ntnada
svetve
tesnvt
vntsnd
vrdear
dvrsen
enarar
The most common character in the first column is e; in the second, a; in the third, s, and so on. Combining these characters returns the error-corrected message, easter.
Given the recording in your puzzle input, what is the error-corrected version of the message being sent?
"""
import argparse
def decode_column(column):
return sorted(column, key=column.count, reverse=True)
parser = argparse.ArgumentParser(description='Advent of code.')
parser.add_argument('inputfile', type=argparse.FileType('r'), help='Path to input file')
args = parser.parse_args()
lines = args.inputfile.read().rstrip("\n").split("\n")
# Updated to use single list comprehension to transpose and call decode_column
code = [decode_column([row[i] for row in lines])[0] for i in range(len(lines[0]))]
print("".join(code))
| Python | 0.999998 | |
8003f9f643b90cf42bdd8ba0ec8d5dc2f96ba191 | Create list-aws-queue.py | list-aws-queue.py | list-aws-queue.py | # This script created a queue
#
# Author - Paul Doyle Nov 2015
#
#
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
# Get the keys from a specific url and then use them to connect to AWS Service
access_key_id = "AKIAIBKC3KC4HZNSXFIA"
secret_access_key = "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"
# Set up a connection to the AWS service.
conn = boto.sqs.connect_to_region("eu-west-1", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# Get a list of the queues that exists and then print the list out
rs = conn.get_all_queues()
for q in rs:
print q.id
| Python | 0.00003 | |
298f297410b9db8b2d211b1d0edddb595f1fa469 | Add timestamp2str() | datetime/datetime.py | datetime/datetime.py | import datetime
# ==============================================================================
# TIMESTAMP 2 STR
# ==============================================================================
def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"):
""" Given a float timestamp it returns the date as a formatted string,
based on the date `pattern` specified """
return datetime.datetime.fromtimestamp(t).strftime(pattern)
| Python | 0.000258 | |
130b0b5d70c6f94caa1e6dbd98aa4361a9ce4d1d | add tips for relax time... | dictate_num/train.py | dictate_num/train.py | import os
import sys
import pyttsx
import random
from data import *
EXIT_TAG = 'n'
class CTrain(object):
def __init__(self):
self._eng = pyttsx.init()
def pre(self):
print "*"*10,"DICTATE NUMBER TRAING", "*"*10
name = raw_input("Please enter your name: ")
data = CData(name).load()
if data is not None:
self._data = data
print "See you again ", name, ", your score is followings:"
self._data.showSt()
self._data.showDaySt()
else:
self._data = CData(name)
print "Welcome new challenger", name
print "You will start on level", self._data.level
return
def aft(self, doCnt):
self._data.save()
print "Bye %s, finish [%3d] , your score is followings:"%(self._data.name, doCnt)
self._data.showDetail(doCnt)
return
def run(self):
IsCon = True
idx = 1
while IsCon:
lvl = self._data.level
k = raw_input("press any key to continue, press n for exit: ")
if k.lower().find(EXIT_TAG) >= 0:
print "End training..."
isCon = False
break
print "\n[%3d]"%( idx,), " now level", lvl,", Please listening and enter what you heard\n"
nums = self.genNum(lvl)
self.readNum(nums)
d = raw_input()
ans,lvl = self._data.score(d, nums)
if ans:
print "SUCC"
else:
print "FAIL: ", nums
idx += 1
return idx-1
def genNum(self, lvl):
s = ""
for _ in range(lvl):
d = random.randint(0,9)
s += str(d)
return s
def readNum(self, nums):
for d in nums:
self._eng.say(d)
self._eng.runAndWait()
return
def main():
train = CTrain()
train.pre()
doCnt = train.run()
train.aft(doCnt)
if __name__ == "__main__":
main()
| import os
import sys
import pyttsx
import random
from data import *
EXIT_TAG = 'n'
class CTrain(object):
def __init__(self):
self._eng = pyttsx.init()
def pre(self):
print "*"*10,"DICTATE NUMBER TRAING", "*"*10
name = raw_input("Please enter your name: ")
data = CData(name).load()
if data is not None:
self._data = data
print "See you again ", name, ", your score is followings:"
self._data.showSt()
self._data.showDaySt()
else:
self._data = CData(name)
print "Welcome new challenger", name
print "You will start on level", self._data.level
return
def aft(self, doCnt):
self._data.save()
print "Bye %s, finish [%3d] , your score is followings:"%(self._data.name, doCnt)
self._data.showDetail(doCnt)
return
def run(self):
IsCon = True
idx = 1
while IsCon:
lvl = self._data.level
print "\n[%3d]"%( idx,), " now level", lvl,", Please listening..."
nums = self.genNum(lvl)
self.readNum(nums)
d = raw_input("enter what you heard(n for exit): ")
if d.lower().find(EXIT_TAG) >= 0:
IsCon = False
break
ans,lvl = self._data.score(d, nums)
if ans:
print "SUCC"
else:
print "FAIL: ", nums
idx += 1
return idx-1
def genNum(self, lvl):
s = ""
for _ in range(lvl):
d = random.randint(0,9)
s += str(d)
return s
def readNum(self, nums):
for d in nums:
self._eng.say(d)
self._eng.runAndWait()
return
def main():
train = CTrain()
train.pre()
doCnt = train.run()
train.aft(doCnt)
if __name__ == "__main__":
main()
| Python | 0 |
0e9e63a48c5f3e02fb49d0068363ac5442b39e37 | Add a body to posts | discussion/models.py | discussion/models.py | from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
| from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
| Python | 0.000001 |
62beb09ca1ecde8be4945016ae09beaad2dad597 | Create disemvowel_trolls.py | disemvowel_trolls.py | disemvowel_trolls.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Disemvowel Trolls
#Problem level: 7 kyu
def disemvowel(string):
return ''.join([letter for letter in string if letter.lower() not in ['a', 'e', 'i', 'o', 'u']])
| Python | 0.000001 | |
078bc9ea1375ac8ff7b2bbb92553ae63e5190cd3 | add var.py in package structData to save vars | trunk/editor/structData/var.py | trunk/editor/structData/var.py | #!/usr/bin/env python
class Var(object):
def __init__(self, name, start_value, set_value=None):
self.name = name
self.start_value = start_value
self.set_value = set_value
| Python | 0 | |
a26f0cc1af189686a24518510095f93b064a36a4 | Add two utility functions for group membership | django_split/base.py | django_split/base.py | import six
import datetime
import inflection
from django.contrib.auth.models import User
from .models import ExperimentGroup
from .validation import validate_experiment
EXPERIMENTS = {}
class ExperimentMeta(type):
def __init__(self, name, bases, dict):
super(ExperimentMeta, self).__init__(name, bases, dict)
# Special case: don't do experiment processing on the base class
if (
name == 'Experiment' and
self.__module__ == ExperimentMeta.__module__
):
return
slug = inflection.underscore(name)
if len(slug) > 48:
raise ValueError("Experiment name too long")
if slug in EXPERIMENTS:
raise AssertionError(
"Experiment %s defined multiple times (as %s.%s and %s.%s)" % (
slug,
dict['__module__'],
dict['__qualname__'],
EXPERIMENTS[slug].__module__,
EXPERIMENTS[slug].__qualname__,
),
)
validate_experiment(self)
self.slug = slug
EXPERIMENTS[slug] = self
class Experiment(six.with_metaclass(ExperimentMeta)):
groups = ('control', 'experiment')
control_group = 'control'
superuser_group = None
include_new_users = True
include_old_users = True
metrics = ()
start_date = None
end_date = None
@classmethod
def group(cls, group_name):
# This will raise a ValueError if the group does not exist. Whilst
# group_index is not used if we're before the experiment start date,
# we want to catch errors from using the wrong group name immediately.
group_index = groups.index(group_name)
# TODO: superuser logic
# Until the start of the experiment, all users are in the control group
if datetime.date.today() < self.start_date:
if group_name == self.control_group:
return User.objects.all()
else:
return User.objects.none()
return User.objects.filter(id__in=
ExperimentGroup.objects.filter(
experiment=self.slug,
group=group_index,
),
)
@classmethod
def in_group(cls, user, group):
return user in cls.group(group)
| import six
import inflection
from .validation import validate_experiment
EXPERIMENTS = {}
class ExperimentMeta(type):
def __init__(self, name, bases, dict):
super(ExperimentMeta, self).__init__(name, bases, dict)
# Special case: don't do experiment processing on the base class
if (
name == 'Experiment' and
self.__module__ == ExperimentMeta.__module__
):
return
slug = inflection.underscore(name)
if len(slug) > 48:
raise ValueError("Experiment name too long")
if slug in EXPERIMENTS:
raise AssertionError(
"Experiment %s defined multiple times (as %s.%s and %s.%s)" % (
slug,
dict['__module__'],
dict['__qualname__'],
EXPERIMENTS[slug].__module__,
EXPERIMENTS[slug].__qualname__,
),
)
validate_experiment(self)
self.slug = slug
EXPERIMENTS[slug] = self
class Experiment(six.with_metaclass(ExperimentMeta)):
groups = ('control', 'experiment')
control_group = 'control'
superuser_group = None
include_new_users = True
include_old_users = True
metrics = ()
start_date = None
end_date = None
| Python | 0.000001 |
316d0518f2cf81ce3045335b79bc993020befce1 | create main class `FlaskQuik` for bridging quik and flask | flask_quik.py | flask_quik.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask.ext.quik
~~~~~~~~~~~~~~
Extension implementing Quik Templates support in Flask with support for
flask-babel
:copyright: (c) 2012 by Thiago Avelino <thiago@avelino.xxx>
:license: MIT, see LICENSE for more details.
"""
from quik import FileLoader
class FlaskQuik(object):
"""
Main class for bridging quik and flask. We try to stay as close as possible
to how Jinja2 is used in Flask, while at the same time surfacing the useful
stuff from Quik.
"""
def __init__(self, app=None):
self.app = None
if app is not None:
self.init_app(app)
self.app = app
def init_app(self, app):
"""
Initialize a :class:`~flask.Flask` application
for use with this extension. This method is useful for the factory
pattern of extension initialization. Example::
quik = FlaskQuik()
app = Flask(__name__)
quik.init_app(app)
.. note::
This call will fail if you called the :class:`FlaskQuik`
constructor with an ``app`` argument.
"""
if self.app:
raise RuntimeError("Cannot call init_app when app argument was "
"provided to FlaskQuik constructor.")
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['quik'] = self
| Python | 0 | |
4844ac93326186ded80147a3f8e1e1429212428b | add user's launcher | tfx/experimental/templates/taxi/stub_component_launcher.py | tfx/experimental/templates/taxi/stub_component_launcher.py | # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub component launcher for launching stub executors in KFP."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfx.experimental.pipeline_testing import base_stub_executor
from tfx.experimental.pipeline_testing import stub_component_launcher
from tfx.experimental.templates.taxi.pipeline import configs
class StubComponentLauncher(stub_component_launcher.StubComponentLauncher):
"""Responsible for launching stub executors in KFP Template.
This stub component launcher cannot be defined in the kubeflow_dag_runner.py
because launcher class is imported by the module path.
"""
def __init__(self, **kwargs):
super(StubComponentLauncher, self).__init__(**kwargs)
# TODO(StubExecutor): GCS directory where KFP outputs are recorded
self.test_data_dir = "gs://{}/testdata".format(configs.GCS_BUCKET_NAME)
# TODO(StubExecutor): customize self.stubbed_component_ids to replace components
# with BaseStubExecutor
self.stubbed_component_ids = ['CsvExampleGen', 'StatisticsGen',
'SchemaGen', 'ExampleValidator',
'Trainer', 'Transform', 'Evaluator', 'Pusher']
# TODO(StubExecutor): (Optional) Use stubbed_component_map to insert custom stub
# executor class as a value and component id as a key.
self.stubbed_component_map = {}
for c_id in self.stubbed_component_ids:
self.stubbed_component_map[c_id] = base_stub_executor.BaseStubExecutor
def get_stub_launcher_class(stub_launcher: Type[StubComponentLauncher],
test_data_dir: Text,
stubbed_component_ids: List[Text],
stubbed_component_map: Dict[Text, Type[base_stub_executor.BaseStubExecutor]]
) -> Type[StubComponentLauncher]:
"""Returns a StubComponentLauncher class.
Returns:
StubComponentLauncher class holding stub executors.
"""
stub_launcher.stubbed_component_map = dict(stubbed_component_map)
for component_id in stubbed_component_ids:
stub_launcher.stubbed_component_map[component_id] = \
base_stub_executor.BaseStubExecutor
stub_launcher.test_data_dir = test_data_dir
return stub_launcher
| Python | 0.000001 | |
20d77f66e0287b3aab08b4cf14f23e7e5672aefd | Create database import script for the Picks table (each NFLPool Player's picks for a given season) | db_setup/nflpool_picks.py | db_setup/nflpool_picks.py | import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
| Python | 0 | |
ed1cd0f7de1a7bebaaf0f336ba52e04286dd87de | Create my_mapper.py | Hadoop--Project-to-map-new-Your-taxi-data-info/my_mapper.py | Hadoop--Project-to-map-new-Your-taxi-data-info/my_mapper.py | #!/usr/bin/env python
import sys
for line in sys.stdin:
line = line.strip()
unpacked = line.split(",")
stadium, capacity, expanded, location, surface, turf, team, opened, weather, roof, elevation = line.split(",")
#medallion, hack_license, vendor_id, rate_code, store_and_fwd_flag, pickup_datetime, dropoff_datetime, passenger_count, trip_time_in_secs, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude = line.split(",")
results = [turf, "1"]
print("\t".join(results))
| Python | 0.000014 | |
8ad4627973db344e228a9170aef030ab58efdeb9 | Add column order and importable objects lists | src/ggrc/converters/__init__.py | src/ggrc/converters/__init__.py | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
| # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
| Python | 0 |
53926f18fb4f058bba9dd23fb75721d3dfa1d24b | add hashes directory | hashes/md5.py | hashes/md5.py | import math
def rearrange(bitString32):
if len(bitString32) != 32:
raise ValueError("Need length 32")
newString = ""
for i in [3,2,1,0]:
newString += bitString32[8*i:8*i+8]
return newString
def reformatHex(i):
hexrep = format(i,'08x')
thing = ""
for i in [3,2,1,0]:
thing += hexrep[2*i:2*i+2]
return thing
def pad(bitString):
startLength = len(bitString)
bitString += '1'
while len(bitString) % 512 != 448:
bitString += '0'
lastPart = format(startLength,'064b')
bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32])
return bitString
def getBlock(bitString):
currPos = 0
while currPos < len(bitString):
currPart = bitString[currPos:currPos+512]
mySplits = []
for i in range(16):
mySplits.append(int(rearrange(currPart[32*i:32*i+32]),2))
yield mySplits
currPos += 512
def not32(i):
i_str = format(i,'032b')
new_str = ''
for c in i_str:
new_str += '1' if c=='0' else '0'
return int(new_str,2)
def sum32(a,b):
return (a + b) % 2**32
def leftrot32(i,s):
return (i << s) ^ (i >> (32-s))
def md5me(testString):
bs =''
for i in testString:
bs += format(ord(i),'08b')
bs = pad(bs)
tvals = [int(2**32 * abs(math.sin(i+1))) for i in range(64)]
a0 = 0x67452301
b0 = 0xefcdab89
c0 = 0x98badcfe
d0 = 0x10325476
s = [7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, \
5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, \
4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, \
6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21 ]
for m in getBlock(bs):
A = a0
B = b0
C = c0
D = d0
for i in range(64):
if i <= 15:
#f = (B & C) | (not32(B) & D)
f = D ^ (B & (C ^ D))
g = i
elif i<= 31:
#f = (D & B) | (not32(D) & C)
f = C ^ (D & (B ^ C))
g = (5*i+1) % 16
elif i <= 47:
f = B ^ C ^ D
g = (3*i+5) % 16
else:
f = C ^ (B | not32(D))
g = (7*i) % 16
dtemp = D
D = C
C = B
B = sum32(B,leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i]))
A = dtemp
a0 = sum32(a0, A)
b0 = sum32(b0, B)
c0 = sum32(c0, C)
d0 = sum32(d0, D)
digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0)
return digest
def test():
assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
assert md5me("The quick brown fox jumps over the lazy dog") == "9e107d9d372bb6826bd81d3542a419d6"
print "Success."
if __name__ == "__main__":
test()
| Python | 0.000001 | |
8141d6cafb4a1c8986ec7065f27d536d98cc9916 | Add little script calculate sample spectra. | Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py | Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py | '''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
| Python | 0 | |
ec0cf9c6eb8ecc69482ed08f22a760d73f420619 | Add API tests | test/test_api/test_api_project_stats.py | test/test_api/test_api_project_stats.py | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2017 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
import json
from default import with_context
from test_api import TestAPI
from factories import ProjectFactory, TaskFactory, TaskRunFactory
from pybossa.repositories import ProjectStatsRepository
import pybossa.cache.project_stats as stats
class TestProjectStatsAPI(TestAPI):
@with_context
def test_query_projectstats(self):
"""Test API query for project stats endpoint works"""
project_stats = []
projects = ProjectFactory.create_batch(3)
for project in projects:
for task in TaskFactory.create_batch(4, project=project, n_answers=3):
TaskRunFactory.create(task=task)
stats.update_stats(project.id)
ps = stats.get_stats(project.id, full=True)
project_stats.append(ps)
extra_stat_types = ['hours_stats', 'dates_stats', 'users_stats']
# As anon
url = '/api/projectstats'
res = self.app_get_json(url)
data = json.loads(res.data)
assert len(data) == 3, data
# Limits
res = self.app.get(url + "?limit=1")
data = json.loads(res.data)
assert len(data) == 1, data
# Keyset pagination
res = self.app.get(url + '?limit=1&last_id=' + str(projects[1].id))
data = json.loads(res.data)
assert len(data) == 1, len(data)
assert data[0]['id'] == project.id
# Errors
res = self.app.get(url + "?something")
err = json.loads(res.data)
err_msg = "AttributeError exception should be raised"
res.status_code == 415, err_msg
assert res.status_code == 415, err_msg
assert err['action'] == 'GET', err_msg
assert err['status'] == 'failed', err_msg
assert err['exception_cls'] == 'AttributeError', err_msg
# Desc filter
url = "/api/projectstats?orderby=wrongattribute"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should be 415."
assert data['status'] == 'failed', data
assert data['status_code'] == 415, data
assert 'has no attribute' in data['exception_msg'], data
# Order by
url = "/api/projectstats?orderby=id"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should get the last item first."
ps_by_id = sorted(project_stats, key=lambda x: x.id, reverse=False)
for i in range(len(project_stats)):
assert ps_by_id[i].id == data[i]['id']
# Desc filter
url = "/api/projectstats?orderby=id&desc=true"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should get the last item first."
ps_by_id = sorted(project_stats, key=lambda x: x.id, reverse=True)
for i in range(len(project_stats)):
assert ps_by_id[i].id == data[i]['id']
# Without full filter
url = "/api/projectstats"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should not return the full stats."
extra = [row['info'].get(_type) for _type in extra_stat_types
for row in data if row['info'].get(_type)]
assert not extra
# With full filter
url = "/api/projectstats?full=1"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should return full stats."
for i, row in enumerate(data):
for _type in extra_stat_types:
assert row['info'][_type] == project_stats[i].info[_type]
| Python | 0.000001 | |
55aae76ae3813045542b8f94736fdfb1e08592f2 | Add chrome driver path. | src/lib/environment/__init__.py | src/lib/environment/__init__.py | import os
import logging
from lib import constants, file_ops
yaml = file_ops.load_yaml_contents(constants.path.YAML)
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
VIRTENV_PATH = PROJECT_ROOT_PATH + constants.path.VIRTUALENV_DIR
LOGGING_FORMAT = yaml[constants.yaml.LOGGING][constants.yaml.FORMAT]
CHROME_DRIVER_PATH = PROJECT_ROOT_PATH + constants.path.RESOURCES + constants.path.CHROME_DRIVER
# register loggers
selenium_logger = logging.getLogger(constants.log.Selenium.SELENIUM_REMOTE_CONNECTION)
# Only display possible problems
selenium_logger.setLevel(logging.WARNING)
| import os
import logging
from lib import constants, file_ops
yaml = file_ops.load_yaml_contents(constants.path.YAML)
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
VIRTENV_PATH = PROJECT_ROOT_PATH + constants.path.VIRTUALENV_DIR
LOGGING_FORMAT = yaml[constants.yaml.LOGGING][constants.yaml.FORMAT]
# register loggers
selenium_logger = logging.getLogger(constants.log.Selenium.SELENIUM_REMOTE_CONNECTION)
# Only display possible problems
selenium_logger.setLevel(logging.WARNING)
| Python | 0 |
56422abd9e5dbc1b17b009d84fd5e4b028719b94 | add basic IPC traffic analyzer | ipc-viewer.py | ipc-viewer.py | #!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file analyzes the output of running with MOZ_IPC_MESSAGE_LOG=1
import sys
import re
msgPatt = re.compile('^\[time:(\d+)\]\[(\d+)(->|<-)(\d+)\]\[([^\]]+)\] (Sending|Received)((?: reply)?) ([^\(]+)\(\[TODO\]\)$')
#[time:1441041587246153][9641->9647][PPluginScriptableObjectParent] Sending reply Reply_NPN_Evaluate([TODO])
matchCount = 0
notMatchCount = 0
msgCounts = {}
for l in sys.stdin:
mm = msgPatt.match(l)
if not mm:
notMatchCount += 1
continue
timeStamp = mm.group(1)
pid1 = mm.group(2)
arrow = mm.group(3)
pid2 = mm.group(4)
actor = mm.group(5)
sendRecv = mm.group(6)
sendRecvExtra = not not mm.group(7)
msg = mm.group(8)
p = (actor, msg)
msgCounts[p] = msgCounts.setdefault(p, 0) + 1
#print timeStamp, pid1, arrow, pid2, actor, sendRecv, sendRecvExtra, msg
matchCount += 1
# Resort the data a bit.
counts = []
for p, count in msgCounts.iteritems():
counts.append((count, p))
counts.sort()
counts.reverse()
for (count, (actor, msg)) in counts:
print count, actor, msg
| Python | 0 | |
561957a2492714e1b6d76b13daeced66a90aba1d | Create __init__.py | docs/_themes/sphinx_rtd_theme/__init__.py | docs/_themes/sphinx_rtd_theme/__init__.py | """Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
VERSION = (0, 1, 5)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
| Python | 0.000429 | |
3018a418b24da540f259a59a578164388b0c2686 | add examples/call-gtk.py | examples/call-gtk.py | examples/call-gtk.py |
import sys
import pygtk
pygtk.require('2.0')
import dbus
import gobject
import gtk
from account import read_account, connect
from call import IncomingCall, OutgoingCall, get_stream_engine
from telepathy.interfaces import CONN_INTERFACE
class CallWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
hbox = gtk.HBox()
hbox.set_border_width(10)
vbox = gtk.VBox()
output_frame = gtk.Frame()
output_frame.set_shadow_type(gtk.SHADOW_IN)
preview_frame = gtk.Frame()
preview_frame.set_shadow_type(gtk.SHADOW_IN)
self.output = gtk.Socket()
self.output.set_size_request(400, 300)
self.preview = gtk.Socket()
self.preview.set_size_request(200, 150)
self.call_button = gtk.Button('Call')
self.call_button.connect('clicked', self._call_button_clicked)
output_frame.add(self.output)
preview_frame.add(self.preview)
vbox.pack_start(preview_frame, False)
vbox.pack_end(self.call_button, False)
hbox.add(output_frame)
hbox.pack_start(vbox, padding=10)
self.add(hbox)
def _call_button_clicked(self, button):
pass
class GtkLoopMixin:
def run(self):
try:
gtk.main()
except KeyboardInterrupt:
print "killed"
self.interrupt()
def quit(self):
gtk.main_quit()
class BaseGtkCall:
def __init__(self):
self.window = CallWindow()
self.window.connect('destroy', gtk.main_quit)
self.window.show_all()
def add_preview_window(self):
se = dbus.Interface(get_stream_engine(),
'org.freedesktop.Telepathy.StreamEngine')
se.AddPreviewWindow(self.window.preview.get_id())
return False
def add_output_window(self):
se = dbus.Interface(get_stream_engine(),
'org.freedesktop.Telepathy.StreamEngine')
chan_path = self.channel._dbus_object._object_path
se.SetOutputWindow(chan_path, 2, self.window.output.get_id())
return False
class GtkOutgoingCall(GtkLoopMixin, BaseGtkCall, OutgoingCall):
def __init__(self, conn, contact):
OutgoingCall.__init__(self, conn, contact)
BaseGtkCall.__init__(self)
def members_changed_cb(self, message, added, removed, local_pending,
remote_pending, actor, reason):
OutgoingCall.members_changed_cb(self, message, added, removed,
local_pending, remote_pending, actor, reason)
if self.handle in added:
gobject.timeout_add(5000, self.add_output_window)
gobject.timeout_add(5000, self.add_preview_window)
class GtkIncomingCall(GtkLoopMixin, BaseGtkCall, IncomingCall):
def __init__(self, conn):
IncomingCall.__init__(self, conn)
BaseGtkCall.__init__(self)
def members_changed_cb(self, message, added, removed, local_pending,
remote_pending, actor, reason):
IncomingCall.members_changed_cb(self, message, added, removed,
local_pending, remote_pending, actor, reason)
if self.conn[CONN_INTERFACE].GetSelfHandle() in added:
gobject.timeout_add(5000, self.add_output_window)
gobject.timeout_add(5000, self.add_preview_window)
if __name__ == '__main__':
assert len(sys.argv) in (2, 3)
account_file = sys.argv[1]
manager, protocol, account = read_account(account_file)
conn = connect(manager, protocol, account)
if len(sys.argv) > 2:
contact = sys.argv[2]
call = GtkOutgoingCall(conn, sys.argv[2])
else:
call = GtkIncomingCall(conn)
print "connecting"
conn[CONN_INTERFACE].Connect()
call.run()
try:
print "disconnecting"
conn[CONN_INTERFACE].Disconnect()
except dbus.DBusException:
pass
| Python | 0 | |
c979fe37cc5f3dd83933893a1e7774c4aa7d061c | Add test script. | examples/get_data.py | examples/get_data.py | '''
Copyright 2019 Trustees of the University of Pennsylvania
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import argparse
import getpass
from ieeg.auth import Session
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', required=True, help='username')
parser.add_argument('-p', '--password',
help='password (will be prompted if missing)')
parser.add_argument('dataset', help='dataset name')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass()
with Session(args.user, args.password) as session:
dataset_name = args.dataset
dataset = session.open_dataset(dataset_name)
raw_data = dataset.get_data(0, 2000, [0, 1])
print(2000, raw_data.shape)
print(raw_data)
raw_data = dataset.get_data(0, 4000, [0, 1])
print(4000, raw_data.shape)
print(raw_data)
raw_data = dataset.get_data(0, 6000, [0, 1])
print(6000, raw_data.shape)
print(raw_data)
session.close_dataset(dataset_name)
if __name__ == "__main__":
main()
| Python | 0 | |
11bd97a647507645f90e259dd8000eb6a8001890 | Add index to log_once table, make cleanup run with db cleanup event. refs #1167 | flexget/utils/log.py | flexget/utils/log.py | """Logging utilities"""
import logging
import hashlib
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, Index
from flexget import schema
from flexget.utils.sqlalchemy_utils import table_schema
from flexget.manager import Session
from flexget.event import event
log = logging.getLogger('util.log')
Base = schema.versioned_base('log_once', 0)
@schema.upgrade('log_once')
def upgrade(ver, session):
if ver is None:
log.info('Adding index to md5sum column of log_once table.')
table = table_schema('log_once', session)
Index('log_once_md5sum', table.c.md5sum, unique=True).create()
ver = 0
return ver
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String, unique=True)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % (self.md5sum)
@event('manager.db_cleanup')
def purge(session):
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
result = session.query(LogMessage).filter(LogMessage.added < old).delete()
if result:
log.verbose('Purged %s entries from log_once table.' % result)
def log_once(message, logger=logging.getLogger('log_once')):
"""Log message only once using given logger. Returns False if suppressed logging."""
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
session = Session()
try:
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
session.close()
return False
row = LogMessage(md5sum)
session.add(row)
finally:
session.commit()
logger.info(message)
return True
| """Logging utilities"""
import logging
from flexget.manager import Session, Base
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime
log = logging.getLogger('util.log')
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % (self.md5sum)
def purge():
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
session = Session()
try:
for message in session.query(LogMessage).filter(LogMessage.added < old):
log.debug('purging: %s' % message)
session.delete(message)
finally:
session.commit()
def log_once(message, logger=logging.getLogger('log_once')):
"""Log message only once using given logger. Returns False if suppressed logging."""
purge()
import hashlib
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
session = Session()
try:
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
session.close()
return False
row = LogMessage(md5sum)
session.add(row)
finally:
session.commit()
logger.info(message)
return True
| Python | 0.000008 |
b3977289de72421530614ff4f28cdf7333d743e4 | Add region migration validation | dbaas/logical/validators.py | dbaas/logical/validators.py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from logical.models import Database
from django.core.exceptions import ValidationError
from django.core.exceptions import ObjectDoesNotExist
from system.models import Configuration
def validate_evironment(database_name, environment_name):
try:
database = Database.objects.get(database_name)
except ObjectDoesNotExist:
pass
else:
dev_envs = Configuration.get_by_name_as_list('dev_envs')
new_db_env_is_not_dev = environment_name not in dev_envs
prod_envs = Configuration.get_by_name_as_list('prod_envs')
db_env_is_prod = database.environment.name in prod_envs
if new_db_env_is_not_dev and db_env_is_prod:
raise ValidationError(
_('%(database_name)s already exists in production!'),
params={'database_name': database_name},
)
| Python | 0 | |
f0e092b060d9afb700f027197fdf44eeb2fdd91b | Create __init__.py | __ini__.py | __ini__.py | Python | 0.000429 | ||
660fc806d11c6a8af321bb14caec21ca7cba4141 | add kafka streaming consumer | deploy/test/kf_consumer1.py | deploy/test/kf_consumer1.py | import json
from kafka import KafkaConsumer
consumer = KafkaConsumer('testres', bootstrap_servers='192.168.33.50:9092')
for msg in consumer:
val = msg.value.decode()
print(msg.key.decode())
print(json.loads(val).get('word'))
print(json.loads(val).get('count'))
print(json.loads(val).get('window'))
print('='*30)
| Python | 0 | |
4c96e1eb17a5cbb4c1a33cef5c37aac00b4ec8e0 | Update test_api.py | dpaste/tests/test_api.py | dpaste/tests/test_api.py | # -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test import TestCase
from ..models import Snippet
from ..forms import EXPIRE_DEFAULT
from ..highlight import LEXER_DEFAULT
class SnippetAPITestCase(TestCase):
def setUp(self):
self.api_url = reverse('dpaste_api_create_snippet')
self.client = Client()
def test_empty(self):
"""
The browser sent a content field but with no data.
"""
data = {}
# No data
response = self.client.post(self.api_url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# No content
data['content'] = ''
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Just some spaces
data['content'] = ' '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Linebreaks or tabs only are not valid either
data['content'] = '\n\t '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_valid(self):
"""
A valid snippet, contains Unicode, tabs, spaces, linebreaks etc.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye"}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# The response is a URL with quotes
self.assertTrue(response.content.startswith('"'))
self.assertTrue(response.content.endswith('"'))
# The URL returned is the absolute url to the snippet.
# If we call that url our snippet should be in the page content.
snippet_url = response.content[1:-1]
response = self.client.get(snippet_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['content'])
| # -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test import TestCase
from ..models import Snippet
from ..forms import EXPIRE_DEFAULT
from ..highlight import LEXER_DEFAULT
class SnippetAPITestCase(TestCase):
def setUp(self):
self.api_url = reverse('dpaste_api_create_snippet')
self.client = Client()
def test_empty(self):
"""
The browser sent a content field but with no data.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ALL tests fail due to a Piston bug:
https://bitbucket.org/jespern/django-piston/issue/221/attributeerror-httpresponseservererror
"""
data = {}
# No data
response = self.client.post(self.api_url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# No content
data['content'] = ''
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Just some spaces
data['content'] = ' '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Linebreaks or tabs only are not valid either
data['content'] = '\n\t '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_valid(self):
"""
A valid snippet, contains Unicode, tabs, spaces, linebreaks etc.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye"}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# The response is a URL with quotes
self.assertTrue(response.content.startswith('"'))
self.assertTrue(response.content.endswith('"'))
# The URL returned is the absolute url to the snippet.
# If we call that url our snippet should be in the page content.
snippet_url = response.content[1:-1]
response = self.client.get(snippet_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['content'])
| Python | 0.000004 |
c831e7cec02e06d9346bf6fdf0dcdf553f4f479e | Add test for interpolating NaNs | metpy/calc/tests/test_tools.py | metpy/calc/tests/test_tools.py | # Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for `calc.tools` module."""
import numpy as np
import pytest
from metpy.calc import (find_intersections, interpolate_nans, nearest_intersection_idx,
resample_nn_1d)
from metpy.testing import assert_array_almost_equal, assert_array_equal
def test_resample_nn():
"""Test 1d nearest neighbor functionality."""
a = np.arange(5.)
b = np.array([2, 3.8])
truth = np.array([2, 4])
assert_array_equal(truth, resample_nn_1d(a, b))
def test_nearest_intersection_idx():
"""Test nearest index to intersection functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
truth = np.array([2, 12])
assert_array_equal(truth, nearest_intersection_idx(y1, y2))
@pytest.mark.parametrize('direction, expected', [
('all', np.array([[8.88, 24.44], [238.84, 1794.53]])),
('increasing', np.array([[24.44], [1794.53]])),
('decreasing', np.array([[8.88], [238.84]]))
])
def test_find_intersections(direction, expected):
"""Test finding the intersection of two curves functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
# Note: Truth is what we will get with this sampling, not the mathematical intersection
assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2)
def test_find_intersections_no_intersections():
"""Test finding the intersection of two curves with no intersections."""
x = np.linspace(5, 30, 17)
y1 = 3 * x + 0
y2 = 5 * x + 5
# Note: Truth is what we will get with this sampling, not the mathematical intersection
truth = np.array([[],
[]])
assert_array_equal(truth, find_intersections(x, y1, y2))
def test_find_intersections_invalid_direction():
"""Test exception if an invalid direction is given."""
x = np.linspace(5, 30, 17)
y1 = 3 * x ** 2
y2 = 100 * x - 650
with pytest.raises(ValueError):
find_intersections(x, y1, y2, direction='increaing')
def test_interpolate_nan_linear():
"""Test linear interpolation of arrays with NaNs in the y-coordinate."""
x = np.linspace(0, 20, 15)
y = 5 * x + 3
nan_indexes = [1, 5, 11, 12]
y_with_nan = y.copy()
y_with_nan[nan_indexes] = np.nan
assert_array_almost_equal(y, interpolate_nans(x, y_with_nan), 2)
def test_interpolate_nan_log():
"""Test log interpolation of arrays with NaNs in the y-coordinate."""
x = np.logspace(1, 5, 15)
y = 5 * np.log(x) + 3
nan_indexes = [1, 5, 11, 12]
y_with_nan = y.copy()
y_with_nan[nan_indexes] = np.nan
assert_array_almost_equal(y, interpolate_nans(x, y_with_nan, kind='log'), 2)
def test_interpolate_nan_invalid():
"""Test log interpolation with invalid parameter."""
x = np.logspace(1, 5, 15)
y = 5 * np.log(x) + 3
with pytest.raises(ValueError):
interpolate_nans(x, y, kind='loog')
| # Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for `calc.tools` module."""
import numpy as np
import pytest
from metpy.calc import find_intersections, nearest_intersection_idx, resample_nn_1d
from metpy.testing import assert_array_almost_equal, assert_array_equal
def test_resample_nn():
"""Test 1d nearest neighbor functionality."""
a = np.arange(5.)
b = np.array([2, 3.8])
truth = np.array([2, 4])
assert_array_equal(truth, resample_nn_1d(a, b))
def test_nearest_intersection_idx():
"""Test nearest index to intersection functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
truth = np.array([2, 12])
assert_array_equal(truth, nearest_intersection_idx(y1, y2))
@pytest.mark.parametrize('direction, expected', [
('all', np.array([[8.88, 24.44], [238.84, 1794.53]])),
('increasing', np.array([[24.44], [1794.53]])),
('decreasing', np.array([[8.88], [238.84]]))
])
def test_find_intersections(direction, expected):
"""Test finding the intersection of two curves functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
# Note: Truth is what we will get with this sampling, not the mathematical intersection
assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2)
def test_find_intersections_no_intersections():
"""Test finding the intersection of two curves with no intersections."""
x = np.linspace(5, 30, 17)
y1 = 3 * x + 0
y2 = 5 * x + 5
# Note: Truth is what we will get with this sampling, not the mathematical intersection
truth = np.array([[],
[]])
assert_array_equal(truth, find_intersections(x, y1, y2))
def test_find_intersections_invalid_direction():
"""Test exception if an invalid direction is given."""
x = np.linspace(5, 30, 17)
y1 = 3 * x ** 2
y2 = 100 * x - 650
with pytest.raises(ValueError):
find_intersections(x, y1, y2, direction='increaing')
| Python | 0.000033 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.