commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
7aabf3383b8386602ed77500ad3b4914028c97a4 | add tuple | bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample,bg1bgst333/Sample | python/tuple/tuple/src/tuple/tuple.py | python/tuple/tuple/src/tuple/tuple.py | #!/usr/bin/python
lst = [10, 20, 30]
print "lst = %s" % lst
lst[1] = 15
print "lst = %s" % lst
tpl = (100, 200, 300)
print tpl[0]
print tpl[1]
print tpl[2]
#tpl[1] = 150
| mit | Python | |
9ca2291f49188b5ecd6df09fd5230e321eb91b2d | Add FileHandler tests. | dvarrazzo/logbook,RazerM/logbook,maykinmedia/logbook,fayazkhan/logbook,Rafiot/logbook,DasIch/logbook,Rafiot/logbook,alonho/logbook,FintanH/logbook,Rafiot/logbook,maykinmedia/logbook,pombredanne/logbook,fayazkhan/logbook,mbr/logbook,alex/logbook,alex/logbook,alonho/logbook,redtoad/logbook,DasIch/logbook,mbr/logbook,DasIch/logbook,dommert/logbook,dvarrazzo/logbook,omergertel/logbook,narfdotpl/logbook,omergertel/logbook,alonho/logbook,omergertel/logbook,redtoad/logbook,mitsuhiko/logbook | test_logbook.py | test_logbook.py | import logbook
import os
import shutil
import unittest
import tempfile
class LogbookTestCase(unittest.TestCase):
def setUp(self):
self.log = logbook.Logger('testlogger')
class BasicAPITestCase(LogbookTestCase):
def test_basic_logging(self):
handler = logbook.TestHandler()
with handler.contextbound(bubble=False):
self.log.warn('This is a warning. Nice hah?')
assert handler.has_warning('This is a warning. Nice hah?')
assert handler.formatted_records == [
'[WARNING] testlogger: This is a warning. Nice hah?'
]
class HandlerTestCase(LogbookTestCase):
def setUp(self):
LogbookTestCase.setUp(self)
self.dirname = tempfile.mkdtemp()
self.filename = os.path.join(self.dirname, 'log.tmp')
def tearDown(self):
shutil.rmtree(self.dirname)
LogbookTestCase.tearDown(self)
def test_file_handler(self):
formatter = logbook.SimpleFormatter(
'{record.level_name}:{record.logger_name}:{record.message}')
handler = logbook.FileHandler(self.filename)
handler.formatter = formatter
with handler.contextbound():
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_lazy_file_handler(self):
formatter = logbook.SimpleFormatter(
'{record.level_name}:{record.logger_name}:{record.message}')
handler = logbook.LazyFileHandler(self.filename)
handler.formatter = formatter
self.assertFalse(os.path.isfile(self.filename))
with handler.contextbound():
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
if __name__ == '__main__':
unittest.main()
| import unittest
import logbook
class BasicAPITestCase(unittest.TestCase):
def test_basic_logging(self):
logger = logbook.Logger('Test Logger')
handler = logbook.TestHandler()
with handler.contextbound(bubble=False):
logger.warn('This is a warning. Nice hah?')
assert handler.has_warning('This is a warning. Nice hah?')
assert handler.formatted_records == [
'[WARNING] Test Logger: This is a warning. Nice hah?'
]
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
f3522fedb75b72476419779850e7bbc5fa277c70 | Add generator | jmigual/IA2016,jmigual/IA2016 | Planificador/gamesCreator.py | Planificador/gamesCreator.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import random;
class Exercice:
def __init__(self, num, level):
self.preparator = []
self.precursor = []
self.num = num
self.level = level
def __cmp__(self, other):
if not isinstance(other, Exercice):
return NotImplemented
return self.num == other.num
def __str__(self):
return "Ex" + str(self.num)
def __repr__(self):
return self.__str__()
def writeHeaders(file, name, exercices):
file.write(" (problem " + name + ")\n (:domain exercices)\n")
def writeObjects(file, exercices):
file.write(" (:objects \n")
file.write(" ")
for e in exercices:
file.write(str(e) + " ")
file.write(" - exercice\n")
file.write(" ")
for i in range(10):
file.write("l" + str(i+1) + " ")
file.write(" - level\n")
file.write(" ")
for i in range(15):
file.write("d" + str(i+1) + " ")
file.write(" - day\n")
file.write(" )\n")
def writeInit(file, exercices):
file.write(" (:init\n")
file.write(" (currentDay d1)\n\n")
# Write levels
for i in range(9):
file.write(" (lower l" + str(i+1) + " l" + str(i+2) + ")\n")
file.write("\n")
# Write days
for i in range(14):
file.write(" (before d" + str(i+1) + " d" + str(i+2) + ")\n")
file.write("\n")
for e in exercices:
file.write(" (exLevel " + str(e) + " l" + str(e.level) + ")\n")
file.write("\n")
# Write precursors
for e in exercices:
for p in e.precursor:
file.write(" (precursor " + str(p) + " " + str(e) + ")\n")
file.write("\n")
# Write preparators
for e in exercices:
for p in e.preparator:
file.write(" (preparator " + str(p) + " " + str(e) + ")\n")
file.write("\n")
file.write(" )\n")
def writeGoal(file, sample):
file.write(" (:goal (and (currentDay d15) ")
for e in sample:
file.write("(exLevel " + str(e) + " l10) ")
file.write("))\n")
def main():
name = raw_input("Nombre del problema (default = ExerciceN): ")
if len(name) == 0:
name = "ExerciceN"
try:
numExercices = int(raw_input("Quantos ejercicios quieres (default = 15)? "))
except ValueError:
numExercices = 15
try:
numFinal = int(raw_input("Quantos ejercicios quieres subir al nivel 10 (default = %d, maximo = %d)? " % (numExercices/4, numExercices)))
except ValueError:
numFinal = numExercices/4
try:
numPrecursor = int(raw_input("Quantos precursores quieres (default = %d, maximo = %d)? " % (numExercices/3, numExercices - 1)))
if numPrecursor >= numExercices:
numPrecursor -= 1
except ValueError:
numPrecursor = numExercices/4
try:
numPreparator = int(raw_input("Quantos preparadores quieres (default = %d, maximo = %d)? " % (numExercices/3, numExercices - 1)))
if numPreparator >= numExercices:
numPreparator -= 1
except ValueError:
numPreparator = numExercices/4
print "\nGracias! Estamos generando el problema, por favor espera."
exercices = []
for i in range(numExercices):
exercices.append(Exercice(i, random.randint(1, 10)))
while numPrecursor >= 0:
r = random.randint(0, numExercices - 2)
# Check that this is not already added
addedAlready = False
for e in exercices[r].precursor:
if (e == exercices[r]):
addedAlready = True
if addedAlready:
continue
exercices[r].precursor.append(exercices[r + 1])
numPrecursor -= 1
while numPreparator >= 0:
r = random.randint(0, numExercices - 2)
# Check that this is not already added
addedAlready = False
for e in exercices[r].preparator:
if (e == exercices[r]):
addedAlready = True
if addedAlready:
continue
exercices[r].preparator.append(exercices[r + 1])
numPreparator -= 1
print "Escribiendo el problema en el arxivo " + name + ".pddl"
with open(name + ".pddl", "w") as file:
file.write("(define\n")
writeHeaders(file, name, exercices)
writeObjects(file, exercices)
writeInit(file, exercices)
writeGoal(file, random.sample(exercices, numFinal))
file.write(")\n")
print "Finalizado!"
if __name__ == '__main__':
main() | mit | Python | |
0a501aca74224581228cf695d8d7d46d845ef8df | Create BPLY.py | icfaust/TRIPPy,icfaust/TRIPPy | BPLY.py | BPLY.py | import AXUV20
import surface
import geometry
| mit | Python | |
d1e6f060edd7b09f84892df95611fc3f52a6fb5a | Rename results file | chengsoonong/crowdastro,chengsoonong/crowdastro | crowdastro/experiment/results.py | crowdastro/experiment/results.py | """Object for storing experimental results.
Matthew Alger
The Australian National University
2016
"""
import h5py
import numpy
class Results(object):
"""Stores experimental results."""
def __init__(self, path, methods, n_splits, n_examples):
"""
path: Path to the results file (h5). File will be created if it does not
already exist.
methods: List of methods being tested.
n_splits: Number of data splits.
n_examples: Number of examples.
"""
if not path.endswith('.h5'):
path += '.h5'
self.h5_path = path
self.methods = {method:index for index, method in enumerate(methods)}
self.n_methods = len(methods)
self.n_splits = n_splits
self.n_examples = n_examples
try:
with h5py.File(path, 'r+') as f:
self._create(f)
except OSError:
with h5py.File(path, 'w') as f:
self._create(f)
# We could store a reference to the actual file, but then there's no
# guarantee we'll close it safely later.
def _create(self, f):
"""Creates the results dataset."""
if 'results' not in f:
f.create_dataset('results',
shape=(self.n_methods, self.n_splits, self.n_examples))
def store_result(self, method, split, example, result):
"""Stores a single result.
method: Method. str
split: Split ID. int
example: Example ID. int
result: Result to store. float
"""
with h5py.File(self.h5_path, 'r+') as f:
f['results'][self.methods[method], split, example] = result
def store_trial(self, method, split, results):
"""Stores results from one trial.
method: Method ID. int
split: Split ID. int
results: Results for each example. (n_examples,) array
"""
with h5py.File(self.h5_path, 'r+') as f:
f['results'][self.methods[method], split] = results
def __getitem__(self, *args, **kwargs):
with h5py.File(self.h5_path, 'r+') as f:
try:
args = (self.methods[args[0]],) + args[1:]
except TypeError:
pass
return f['results'].__getitem__(*args, **kwargs)
def __repr__(self):
return 'Results({}, methods={}, n_splits={}, n_examples={})'.format(
repr(self.h5_path),
sorted(self.methods, key=self.methods.get),
self.n_splits, self.n_examples)
| mit | Python | |
52f5eb3e39d7cc01d2d149d87eb63086028ca265 | add integration tests for settings | gentoo/identity.gentoo.org,dastergon/identity.gentoo.org,dastergon/identity.gentoo.org,gentoo/identity.gentoo.org | okupy/tests/integration/test_settings.py | okupy/tests/integration/test_settings.py | # vim:fileencoding=utf8:et:ts=4:sts=4:sw=4:ft=python
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from mockldap import MockLdap
from okupy.tests import vars
class SettingsIntegrationTests(TestCase):
@classmethod
def setUpClass(cls):
cls.mockldap = MockLdap(vars.DIRECTORY)
def setUp(self):
self.client = Client()
self.mockldap.start()
self.ldapobject = self.mockldap[settings.AUTH_LDAP_SERVER_URI]
def tearDown(self):
self.mockldap.stop()
def test_profile_settings_page_uses_correct_template(self):
response = self.client.post('/login/', vars.LOGIN_ALICE)
response = self.client.get('/')
response = self.client.get('/profile-settings/')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateUsed(response, 'settings-profile.html')
def test_password_settings_page_uses_correct_template(self):
response = self.client.post('/login/', vars.LOGIN_ALICE)
response = self.client.get('/')
response = self.client.get('/password-settings/')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateUsed(response, 'settings-password.html')
def test_email_settings_page_uses_correct_template(self):
response = self.client.post('/login/', vars.LOGIN_ALICE)
response = self.client.get('/')
response = self.client.get('/email-settings/')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateUsed(response, 'settings-email.html')
def test_contact_settings_page_uses_correct_template(self):
response = self.client.post('/login/', vars.LOGIN_ALICE)
response = self.client.get('/')
response = self.client.get('/contact-settings/')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateUsed(response, 'settings-contact.html')
def test_gentoo_account_settings_page_uses_correct_template(self):
response = self.client.post('/login/', vars.LOGIN_ALICE)
response = self.client.get('/')
response = self.client.get('/gentoo-dev-settings/')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateUsed(response, 'settings-gentoo.html')
def test_profile_settings_page_returns_404_for_non_authenticated_users(self):
response = self.client.get('/profile-settings/')
self.assertTrue(response.status_code, 404)
def test_password_settings_page_returns_404_for_non_authenticated_users(self):
response = self.client.get('/password-settings/')
self.assertTrue(response.status_code, 404)
def test_email_settings_page_returns_404_for_non_authenticated_users(self):
response = self.client.get('/email-settings/')
self.assertTrue(response.status_code, 404)
def test_contact_setttings_page_returns_404_for_non_authenticated_users(self):
response = self.client.get('/contact-settings/')
self.assertTrue(response.status_code, 404)
def test_gentoo_account_settings_page_returns_404_for_non_authenticated_users(self):
response = self.client.get('/gentoo-dev-settings/')
self.assertTrue(response.status_code, 404)
| agpl-3.0 | Python | |
fd540540fa269279502b89ef83f1daaf38210162 | move only module to __init__.py | bobpoekert/trulia_fetcher | __init__.py | __init__.py | import requests, re, json
import lxml.etree
import lxml.html.soupparser
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/536.26.14 (KHTML, like Gecko) Version/6.0.1 Safari/536.26.14'
json_getter = re.compile(r'trulia\.propertyData\.set\((.*?)\);')
def get(url):
return requests.get(url, headers={'User-Agent':user_agent})
def parse(html):
try:
return lxml.etree.fromstring(html)
except:
return lxml.html.soupparser.fromstring(html)
def get_data(city, kind='for_rent',
price=None, pets=None, amenities=None):
"""
Get property listings from Trulia.
city: The name (city, state) of the city to look in
kind: for_rent or for_sale
returns: itreator of dicts with result data"""
assert kind in ('for_rent','for_sale')
city = city.replace(', ', ',').replace(' ', '_')
url_chunks = []
if price:
url_chunks.append('%d-%d_price' % price)
if pets:
url_chunks.append('%s_pets' % pets)
if amenities:
if isinstance(amenities, str):
url_chunks.append('%s_amenities' % amenities)
else:
for e in amenities:
url_chunks.append('%s_amenities' % e)
base_url = 'http://trulia.com/%s/%s/%s' % (kind, city, '/'.join(url_chunks))
print base_url
first_page = get(base_url).text
res = parse(first_page).xpath("id('4_paging')/a[last()]")[0]
page_count = int(res.text)
for page in xrange(page_count):
if page == 0:
html = first_page
else:
html = get('%s/%d_p/' % (base_url, page)).text
for blob in json_getter.finditer(html):
for e in json.loads(blob.group(1)):
yield e
| mit | Python | |
b1b9d1c9e46bcc92c9046450d92ae8e89f2d2998 | Create __init__.py | Strain88/VKapi2 | __init__.py | __init__.py | __version__ = '0.9.5'
from VKapi2.exceptions import ApiException
from VKapi2.api import Api
from VKapi2.api import ApiHelper
import sys
import logging
import requests
logger = logging.getLogger('VK_api_2')
formatter = logging.Formatter(
'%(asctime)s (%(filename)s:%(lineno)d %(threadName)s) %(levelname)s - %(name)s: "%(message)s"'
)
#logging in console:
#console_output_handler = logging.StreamHandler(sys.stderr)
#console_output_handler.setFormatter(formatter)
#logger.addHandler(console_output_handler)
#logging to fille:
file_output_handler = logging.FileHandler(filename='VK_api2.log', mode='wb')
file_output_handler.setFormatter(formatter)
logger.addHandler(file_output_handler)
logger.setLevel(logging.ERROR)
class Session(object):
"""
This class will take stand-alone token and generate requests to VK
"""
API_URL = "https://api.vk.com/method/{0}"
CONNECT_TIMEOUT = 5
READ_TIMEOUT = 9999
def __init__(self, access_token):
self.at = access_token
"""
Gets stand-alone app token.
:param access_token: standalone app token.
"""
@staticmethod
def _make_request(method_name, method='get', params=None, files=None, base_url=API_URL, timeout=100):
"""
Makes a request to VK api.
:param method_name: Name of the API method to be called. (E.g. 'getUploadServer')
:param method: HTTP method to be used. GET by default.
:param params: Optional parameters. Should be a dictionary with key-value pairs.
:param files: Optional files.
:return: The result parsed to a JSON dictionary.
"""
request_url = base_url.format(method_name)
logger.debug("Request: method={0}, url={1}, params={2}, files={3}".format(method, request_url, params, files))
read_timeout=Session.READ_TIMEOUT
if params:
if 'timeout' in params: read_timeout=params['timeout'] + 10
r = requests.request(method, request_url, params=params, files=files, timeout=(Session.CONNECT_TIMEOUT, read_timeout))
logger.debug("This server returned: '{0}'".format(r.text.encode('utf8')))
return Session._check_result(method_name, r)
@staticmethod
def _check_result(method_name, result):
"""
Checks whether `result` is a valid API response.
A result is considered invalid if:
- The server returned an HTTP response code other than 200
- The content of the result is invalid JSON.
- The method call was unsuccessful (The JSON 'ok' field equals False)
:raises ApiException: if one of the above listed cases is applicable
:param method_name: The name of the method called
:param result: The returned result of the method request
:return: The result parsed to a JSON dictionary.
"""
if result.status_code != 200:
msg = 'The server returned HTTP {0} {1}. Response body:\n[{2}]' \
.format(result.status_code, result.reason, result.text.encode('utf8'))
raise ApiException(msg, method_name, result)
try:
result_json = result.json()
except:
msg = 'The server returned an invalid JSON response. Response body:\n[{0}]' \
.format(result.text.encode('utf8'))
raise ApiException(msg, method_name, result)
return result_json
| mit | Python | |
5f80b313c436bdac9eb8db4e9f835a210833c106 | Add __init__.py. | DiscoViking/pyicemon,DiscoViking/pyicemon | __init__.py | __init__.py | import messages
from connection import Connection
from monitor import Monitor
from publisher import Publisher
| mit | Python | |
c5f16d5d8c85383a3735b871962f70509109ebd7 | Create __init__.py | deepak223098/Data_Science_Python | __init__.py | __init__.py | bsd-3-clause | Python | ||
a3e3b5f0964d700732d623a3dcafb0056f4eb1eb | Update __version__ | jleclanche/dj-stripe,mthornhill/dj-stripe,jleclanche/dj-stripe,kavdev/dj-stripe,doctorwidget/dj-stripe,cjrh/dj-stripe,pydanny/dj-stripe,doctorwidget/dj-stripe,photocrowd/dj-stripe,cjrh/dj-stripe,dj-stripe/dj-stripe,mthornhill/dj-stripe,jameshiew/dj-stripe,jameshiew/dj-stripe,pydanny/dj-stripe,tkwon/dj-stripe,dj-stripe/dj-stripe,photocrowd/dj-stripe,kavdev/dj-stripe,tkwon/dj-stripe,LaunchlabAU/dj-stripe,areski/dj-stripe,LaunchlabAU/dj-stripe,areski/dj-stripe | djstripe/__init__.py | djstripe/__init__.py | from __future__ import unicode_literals
import warnings
from django import get_version as get_django_version
__title__ = "dj-stripe"
__summary__ = "Django + Stripe Made Easy"
__uri__ = "https://github.com/pydanny/dj-stripe/"
__version__ = "0.7.0-dev"
__author__ = "Daniel Greenfeld"
__email__ = "pydanny@gmail.com"
__license__ = "BSD"
__license__ = "License :: OSI Approved :: BSD License"
__copyright__ = "Copyright 2015 Daniel Greenfeld"
if get_django_version() <= '1.6.x':
msg = "dj-stripe deprecation notice: Django 1.6 and lower are deprecated\n" \
"and will be removed in dj-stripe 0.6.0.\n" \
"Reference: https://github.com/pydanny/dj-stripe/issues/173"
warnings.warn(msg)
| from __future__ import unicode_literals
import warnings
from django import get_version as get_django_version
__title__ = "dj-stripe"
__summary__ = "Django + Stripe Made Easy"
__uri__ = "https://github.com/pydanny/dj-stripe/"
__version__ = "0.5.0"
__author__ = "Daniel Greenfeld"
__email__ = "pydanny@gmail.com"
__license__ = "BSD"
__license__ = "License :: OSI Approved :: BSD License"
__copyright__ = "Copyright 2015 Daniel Greenfeld"
if get_django_version() <= '1.6.x':
msg = "dj-stripe deprecation notice: Django 1.6 and lower are deprecated\n" \
"and will be removed in dj-stripe 0.6.0.\n" \
"Reference: https://github.com/pydanny/dj-stripe/issues/173"
warnings.warn(msg)
| mit | Python |
57bd45f13a091dd6fe40ecd960dc7e6b00d60def | support for epics ioc to host XRF scan plans. | NSLS-II-SRX/ipython_ophyd,NSLS-II-SRX/ipython_ophyd,NSLS-II-SRX/ipython_ophyd | profile_xf05id1/startup/30-scanutils.py | profile_xf05id1/startup/30-scanutils.py | from ophyd import EpicsSignal,EpicsSignalRO,Device
from ophyd import Component as Cpt
class SRXScanRecord(Device):
class OneScan(Device):
p1s = Cpt(EpicsSignal, 'P1-S')
p2s = Cpt(EpicsSignal, 'P2-S')
p1i = Cpt(EpicsSignal, 'P1-I')
p2i = Cpt(EpicsSignal, 'P2-I')
p1stp = Cpt(EpicsSignal, 'P1-STP')
p2stp = Cpt(EpicsSignal, 'P2-STP')
p1npts = Cpt(EpicsSignalRO, 'P1-NPTS')
p2npts = Cpt(EpicsSignalRO, 'P2-NTPS')
p1e = Cpt(EpicsSignalRO, 'P1-E')
p2e = Cpt(EpicsSignalRO, 'P2-E')
p1ena = Cpt(EpicsSignal, 'P1-ENA')
p2ena = Cpt(EpicsSignal, 'P2-ENA')
curpt = Cpt(EpicsSignal, 'CURPT')
npts = Cpt(EpicsSignalRO, 'NPTS')
tpp = Cpt(EpicsSignal, 'TPP')
ena = Cpt(EpicsSignal, 'ENA')
acq = Cpt(EpicsSignal, 'ACQ')
# scans = [ Cpt(OneScan,'Scan'+str(i)+':') for i in range(0,8) ]
scan0 = Cpt(OneScan,'Scan0:')
scan1 = Cpt(OneScan,'Scan1:')
scan2 = Cpt(OneScan,'Scan2:')
scan3 = Cpt(OneScan,'Scan3:')
scan4 = Cpt(OneScan,'Scan4:')
scan5 = Cpt(OneScan,'Scan5:')
scan6 = Cpt(OneScan,'Scan6:')
scan7 = Cpt(OneScan,'Scan7:')
current_scan = Cpt(EpicsSignal,'Scan:CUR')
time_remaining = Cpt(EpicsSignal,'Scan:REMTIME')
scanning = Cpt(EpicsSignal, 'Scan:ENA')
scanrecord = SRXScanRecord('XF:05IDA-CT{IOC:ScanBroker01}')
scanrecord.scan0.p1s.put(25.0)
scanrecord.scan0.p2s.put(20.0)
scanrecord.scan0.p1i.put(0.010)
scanrecord.scan0.p2i.put(0.010)
scanrecord.scan0.p1stp.put(3)
scanrecord.scan0.p2stp.put(3)
scanrecord.scan0.ena.put(1)
scanrecord.scan0.acq.put(1)
| bsd-2-clause | Python | |
71c8e8e3d6395b51a453e591bee882cd3e024d26 | Create solve-the-equation.py | kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/solve-the-equation.py | Python/solve-the-equation.py | # Time: O(n)
# Space: O(n)
# Solve a given equation and return the value of x in the form of string "x=#value".
# The equation contains only '+', '-' operation, the variable x and its coefficient.
#
# If there is no solution for the equation, return "No solution".
#
# If there are infinite solutions for the equation, return "Infinite solutions".
#
# If there is exactly one solution for the equation, we ensure that the value of x is an integer.
#
# Example 1:
# Input: "x+5-3+x=6+x-2"
# Output: "x=2"
# Example 2:
# Input: "x=x"
# Output: "Infinite solutions"
# Example 3:
# Input: "2x=x"
# Output: "x=0"
# Example 4:
# Input: "2x+3x-6x=x+2"
# Output: "x=-1"
# Example 5:
# Input: "x=x+2"
# Output: "No solution"
class Solution(object):
def solveEquation(self, equation):
"""
:type equation: str
:rtype: str
"""
a, b = 0, 0
side = 1
for eq, sign, num, isx in re.findall('(=)|([-+]?)(\d*)(x?)', equation):
if eq:
side = -1
elif isx:
a += side * int(sign + '1') * int(num or 1)
elif num:
b -= side * int(sign + num)
return 'x=%d' % (b / a) if a else 'No solution' if b else 'Infinite solutions'
| mit | Python | |
97bf042815dff04a87b3321d1c269036ab9e7794 | Create __init__.py | ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station | __init__.py | __init__.py | mit | Python | ||
73ce4aafc7eed616adb36a9b9f7268b696fbd489 | Create __init__.py | sanmik/brain_network_viz,sanmik/brain_network_viz | __init__.py | __init__.py | from source import networkviz as networkviz
| mit | Python | |
4dc7c6be7b3f6741e787731801b4d853c1f24f1f | Add mkerefuse.__main__ module | tomislacker/python-mke-trash-pickup,tomislacker/python-mke-trash-pickup | mkerefuse/__main__.py | mkerefuse/__main__.py | #!/usr/bin/env python3
"""
Looks up addresses to discover future garbage and/or recycle pickup dates
within the city of Milwaukee, WI
Usage:
mkerefuse --help
mkerefuse [options]
Options:
-a, --address STRING House number
-d, --direction STRING Address direction (N, S, E, W)
-s, --street STRING Street Name (ex: '27th')
-t, --street-type STRING Street Type
-T, --types List all Street Types
"""
import logging
import sys
from docopt import docopt
from mkerefuse import __version__
from mkerefuse.util import LogProducer
from mkerefuse.util import setup_logging
setup_logging()
# Define a logger
log = logging.getLogger('mke-refuse')
# Parse arguments
log.debug("Parsing arguments")
args = docopt(__doc__, version=__version__)
# Load the library
from mkerefuse.refuse import RefuseQuery
from mkerefuse.refuse import RefuseQueryAddress
# If the street type listing was asked for, display that and exit
if args['--types']:
log.debug("Listing Street Type values")
print("- " + "\n- ".join(RefuseQueryAddress.STREET_TYPES))
sys.exit(0)
# Define the address
log.debug("Composing query address")
address = RefuseQueryAddress(
house_number=args['--address'],
direction=args['--direction'],
street_name=args['--street'],
street_type=args['--street-type'])
# Execute the query
log.info("Executing query...")
pickup = RefuseQuery.Execute(address)
log.info("Query returned")
# Show the results
print(repr(pickup))
| unlicense | Python | |
bf05dd9c1b39c8752aae60997fe5aca6e5572584 | Create gae_pylint_plugin.py | tianhuil/gae_pylint_plugin | gae_pylint_plugin.py | gae_pylint_plugin.py | from astroid import MANAGER
from astroid import scoped_nodes
NDB_PROPERTIES = [
'DateTimeProperty',
'StringProperty',
'KeyProperty',
'StructuredProperty'
]
def register(linter):
pass
def transform(modu):
if modu.name == 'google.appengine.ext.ndb':
for f in NDB_PROPERTIES:
modu.locals[f] = [scoped_nodes.Class(f, None)]
MANAGER.register_transform(scoped_nodes.Module, transform)
| apache-2.0 | Python | |
e0ae0c605fd30f4f303abf390aaa6cbfa18cee39 | Bump version 1.22.3 | akittas/geocoder,DenisCarriere/geocoder | geocoder/__init__.py | geocoder/__init__.py | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
"""
Geocoder
~~~~~~~~
Simple and consistent geocoding library written in Python.
Many online providers such as Google & Bing have geocoding services,
these providers do not include Python libraries and have different
JSON responses between each other.
Consistant JSON responses from various providers.
>>> g = geocoder.google('New York City')
>>> g.latlng
[40.7127837, -74.0059413]
>>> g.state
'New York'
>>> g.json
...
"""
__title__ = 'geocoder'
__author__ = 'Denis Carriere'
__author_email__ = 'carriere.denis@gmail.com'
__version__ = '1.22.3'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2013-2016 Denis Carriere'
# CORE
from geocoder.api import get, yahoo, bing, geonames, mapquest, google, mapbox # noqa
from geocoder.api import nokia, osm, tomtom, geolytica, arcgis, opencage # noqa
from geocoder.api import maxmind, ipinfo, freegeoip, ottawa, here, baidu, w3w # noqa
from geocoder.api import yandex, mapzen, komoot, tamu, geocodefarm, tgos, uscensus # noqa
# EXTRAS
from geocoder.api import timezone, elevation, places, ip, canadapost, reverse, distance, location # noqa
# CLI
from geocoder.cli import cli # noqa
| #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
"""
Geocoder
~~~~~~~~
Simple and consistent geocoding library written in Python.
Many online providers such as Google & Bing have geocoding services,
these providers do not include Python libraries and have different
JSON responses between each other.
Consistant JSON responses from various providers.
>>> g = geocoder.google('New York City')
>>> g.latlng
[40.7127837, -74.0059413]
>>> g.state
'New York'
>>> g.json
...
"""
__title__ = 'geocoder'
__author__ = 'Denis Carriere'
__author_email__ = 'carriere.denis@gmail.com'
__version__ = '1.21.2'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2013-2016 Denis Carriere'
# CORE
from geocoder.api import get, yahoo, bing, geonames, mapquest, google, mapbox # noqa
from geocoder.api import nokia, osm, tomtom, geolytica, arcgis, opencage # noqa
from geocoder.api import maxmind, ipinfo, freegeoip, ottawa, here, baidu, w3w # noqa
from geocoder.api import yandex, mapzen, komoot, tamu, geocodefarm, tgos, uscensus # noqa
# EXTRAS
from geocoder.api import timezone, elevation, places, ip, canadapost, reverse, distance, location # noqa
# CLI
from geocoder.cli import cli # noqa
| mit | Python |
463de112add0178b6e3aa13b75bc7ae8dc26b612 | Create __openerp__.py | Therp/message_box_wizard | __openerp__.py | __openerp__.py | # -*- encoding: utf-8 -*-
{
'name': 'Message box dialog',
'application': False,
'version': '8.0.r1.0',
'category': 'General',
'description': '''
Allows to ask a question through a dialog.
Via a wizard your question is asked.
Buttons can be hidden or their labels can be set
* Creates a record in message_box.
* Processes reply in call to your model's process_reply().
* Example of code in your wizard (or model):
....
ctx.update({"active_model": self._name,
. "active_id": wiz.id,
. "active_ids": [wiz.id],
. "reply_labels": ['Cancel', 'Proceed']
. })
. # adjust reply_labels for buttons returning 0, 1, .. upto 4
. # buttons 2, 3 and 4 will be invisible by default
. # special value 'invisible' will hide a button
return self.pool.get("message.box").ask_question(
. cr, uid, {"question": "Do you really need an answer?",}, context=ctx)
....
def process_reply(self, cr, uid, ids, reply, context=None):
. ....
. return {
. "res_id": res_id,
. "name": self._description,
. "view_type": "form",
. "view_mode": "form",
. "res_model": self._name,
. "domain": [],
. "target": "new",
. "context": context,
. "type": "ir.actions.act_window",
. }
''',
'author': 'Therp BV',
'website': 'http://www.therp.nl',
'depends': [
],
'data': [
'wizard/message_box.xml',
],
'demo_xml': [],
'qweb': [],
'installable': True,
}
| agpl-3.0 | Python | |
cf060a473bb2412f97301ab72f96a57d370bc79f | Add IncrementalModel | bgyori/indra,johnbachman/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,pvtodorov/indra,sorgerlab/belpy,pvtodorov/indra,sorgerlab/belpy,johnbachman/belpy,jmuhlich/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,bgyori/indra,sorgerlab/indra,johnbachman/indra,johnbachman/belpy,sorgerlab/indra,jmuhlich/indra,johnbachman/indra,jmuhlich/indra | models/rasmachine/incremental_model.py | models/rasmachine/incremental_model.py | import pickle
from indra.assemblers import PysbAssembler
class IncrementalModel(object):
def __init__(self, fname=None):
if fname is None:
self.stmts = {}
else:
try:
self.stmts = pickle.load(open(fname, 'rb'))
except:
print 'Could not load %s' % fname
self.stmts = {}
def save(self, fname='model.pkl'):
with open(fname, 'wb') as fh:
pickle.dump(self.stmts, fh)
def add_statements(self, pmid, stmts):
self.stmts[pmid] = stmts
def get_statements(self):
stmt_lists = [v for k, v in self.stmts.iteritems()]
stmts = []
for s in stmt_lists:
stmts += s
return stmts
def make_model(self):
pa = PysbAssembler()
pa.add_statements(self.get_statements())
pa.make_model()
return pa.model
| bsd-2-clause | Python | |
12e7e7b29a91e67755c5d9b9e29b4a8614def1bc | Add module disable framework | Motoko11/MotoBot | motobot/core_plugins/disable_module.py | motobot/core_plugins/disable_module.py | from motobot import command, request, IRCLevel, Priority, Notice, split_response
@request('GET_PLUGINS')
def disabled_request(bot, context, channel):
disabled = context.database.get({}).get(channel.lower(), set())
return filter_plugins(bot.plugins, disabled)
def filter_plugins(plugins, disabled):
return filter(
lambda plugin: plugin.priority == Priority.max or
plugin.module.lower() not in disabled,
plugins
)
@command('module', priority=Priority.max, level=IRCLevel.op)
def module_command(bot, context, message, args):
""" Command to enable or disable modules in the bot.
The 'enable' and 'disable' argument both require a module argument.
The 'show' argument will show the currently disabled modules in the channel.
The 'list' argument will return a list of modules in the bot.
If 'list' is called with an argument, it'll return a list of plugins in that module.
The 'get' argument will return the module a particular command is located in.
"""
try:
arg = args[1].lower()
if arg == 'enable':
module = ' '.join(args[2:])
response = enable_module(context.database, context.channel, module)
elif arg == 'disable':
module = ' '.join(args[2:])
response = disable_module(bot, context.database, context.channel, module)
elif arg == 'show':
response = show_disabled_modules(context.database, context.channel)
elif arg == 'list':
module = ' '.join(args[2:])
response = list_module(bot, module)
elif arg == 'get':
module = ' '.join(args[2:])
response = get_module(bot, module)
else:
response = "Error: Invalid argument."
except IndexError:
response = "Error: Too few arguments supplied."
return response, Notice(context.nick)
def enable_module(database, channel, module):
return "Error: Not yet implemented."
def disable_module(bot, database, channel, module):
return "Error: Not yet implemented."
def show_disabled_modules(database, channel):
return "Error: Not yet implemented."
def list_module(bot, module):
if module:
pass
else:
response = split_response(bot.modules, "Modules: {};")
return response
def get_module(bot, module):
return "Error: Not yet implemented."
| mit | Python | |
7c4e265808440c8e670651660909e61498c496a0 | add dijkstrashortreach | EdisonAlgorithms/HackerRank,EdisonAlgorithms/HackerRank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonCodeKeeper/hacker-rank,zeyuanxy/hacker-rank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank | algorithms/graph-theory/dijkstrashortreach/dijkstrashortreach.py | algorithms/graph-theory/dijkstrashortreach/dijkstrashortreach.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-12-26 16:28:23
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-12-26 16:28:36
import sys
import queue
class Vertex:
def __init__(self):
self.edges = {}
def get_edges(self):
return self.edges
def add_edge(self, vertex, distance):
if vertex not in self.edges or distance < self.edges[vertex]:
self.edges[vertex] = distance
class Graph:
def __init__(self, N):
self.vertices = {}
while (N > 0):
self.vertices[N] = Vertex()
N -= 1
def get_vertices(self):
return self.vertices
def get_vertex(self, key):
return self.vertices[key]
def add_vertex(self, key, vertex):
self.vertices[key] = vertex
class Dijkstra:
def __init__(self, graph):
self.graph = graph
def calculate(self, start):
distances = {}
adjacents = queue.PriorityQueue()
adjacents.put((0, start))
while not adjacents.empty():
(distance, vertex) = adjacents.get()
if vertex in distances:
continue
distances[vertex] = distance
self.update_adjacents(vertex, distances, adjacents)
return distances
def update_adjacents(self, parent, distances, adjacents):
edges = self.graph.get_vertex(parent).get_edges()
for vertex, distance in edges.items():
adjacents.put((distances[parent] + distance, vertex)) | mit | Python | |
8ee4cef9689ee8a73e3952d46dc93908208b84ea | Add second passing test | DoWhileGeek/google_foobar | maximum_equality.py | maximum_equality.py | '''
Maximum equality
================
Your colleague Beta Rabbit, top notch spy and saboteur, has been working tirelessly to discover a way to break into Professor Boolean's lab and rescue the rabbits being held inside. He has just excitedly informed you of a breakthrough - a secret bridge that leads over a moat (likely filled with alligators, or zombies, or alligator-zombies, given his penchant for zombifying... You should probably stop thinking about it.). The only way over the moat is by a rickety train, but there's a catch: The train is rigged such that under particular conditions, it will throw its passengers overboard. You've determined that the best odds of getting safely across is to have as many cars share the same exact weight as possible. Luckily, all the rabbits' weights are essentially equal. How convenient!
The rabbits are already organized into families, but these families vary in size. In order for this plan to work, you need to find a way to make as many train cars as possible have exactly the same number of passengers.
Beta Rabbit will provide you with an array x, where each element in the array is an integer representing the number of rabbits that want to share a particular car. You can give the command for a rabbit to move from any one car to any other, and you have enough time to give as many commands as you need. In other words, choose two elements of the array, x[i] and x[j] (idistinct fromj) and simultaneously increment x[i] by 1 and decrement x[j] by 1. Your goal is to get as many elements of the array to have equal value as you can.
For example, if the array was [1,4,1] you could perform the operations as follows:
Send a rabbit from the 1st car to the 0th: increment x[0], decrement x[1], resulting in [2,3,1]
Send a rabbit from the 1st car to the 2nd: increment x[2], decrement x[1], resulting in [2,2,2].
All the elements are of the array are equal now, and you've got a strategy to report back to Beta Rabbit!
Note that if the array was [1,2], the maximum possible number of equal elements we could get is 1, as the cars could never have the same number of rabbits in them.
Write a function answer(x), which takes the array of integers x and returns the maximum number of equal array elements that we can get, by doing the above described command as many times as needed.
The number of cars in the train (elements in x) will be at least 2, and no more than 100. The number of rabbits that want to share a car (each element of x) will be an integer in the range [0, 1000000].
Languages
=========
To provide a Python solution, edit solution.py
To provide a Java solution, edit solution.java
Test cases
==========
Inputs:
(int list) x = [1, 4, 1]
Output:
(int) 3
Inputs:
(int list) x = [1, 2]
Output:
(int) 1
'''
def answer(x):
passengers = sum(x)
if not passengers % len(x):
cars = len(x)
else:
cars = len(x) - 1
return cars
assert 3 == answer([1,4,1])
assert 1 == answer([1,2])
assert 2 == answer([1,2,1])
| unlicense | Python | |
cd325c15dd23d1047dc0ca8fc5108ee93e38568d | Create บวกลบธรรมดา.py | wannaphongcom/code-python3-blog | บวกลบธรรมดา.py | บวกลบธรรมดา.py | __author__ = 'วรรณพงษ์'
#import re
print("Text")
a = input("Text")
if a.find("+"):
s = a.split("+")
in1 = int(s[0])
in2 = int(s[1])
cal = in1 + in2
elif a.find("-"):
s = a.split("-")
in1 = int(s[0])
in2 = int(s[1])
cal = in1 - in2
elif a.find("*"):
s = a.split("*")
in1 = int(s[0])
in2 = int(s[1])
cal = in1 * in2
elif a.find("//"):
s = a.split("//")
in1 = int(s[0])
in2 = int(s[1])
cal = in1 // in2
else:
cal = "error";
print(cal)
| mit | Python | |
0f1003cf17acdb3ee9b9614131f7e8d7c9cebb41 | add list of events | ciappi/Yaranullin | yaranullin/events.py | yaranullin/events.py | # yaranullin/events.py
#
# Copyright (c) 2012 Marco Scopesi <marco.scopesi@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
''' The events of Yaranullin '''
# Basic
ANY = 0
QUIT = 1
TICK = 2
# Network
JOIN = 10
# Game
PAWN-NEW = 20
PAWN-DEL = 21
PAWN-UPDATED = 22
PAWN-MOVE = 23
PAWN-PLACE = 24
PAWN-NEXT = 25
BOARD-NEW = 26
BOARD-DEL = 27
BOARD-CHANGE = 28
STATE-UPDATE = 29
| isc | Python | |
1c09bfb7c999fba548730a5a9249455f709d7f16 | Convert contribution zip codes to county names | srwareham/CampaignAdvisor,srwareham/CampaignAdvisor,srwareham/CampaignAdvisor,srwareham/CampaignAdvisor | data_cleanup/data_cleanup.py | data_cleanup/data_cleanup.py | import os
import pandas as pd
import csv
# Directory path constants
ROOT_PATH = os.path.abspath(__file__ + "/../../")
DATA_DIR_PATH = os.path.join(ROOT_PATH, "data_files")
# File path constants
CONTRIBUTIONS_PATH = os.path.join(DATA_DIR_PATH, "contributions.csv")
ZIP_CODES_PATH = os.path.join(DATA_DIR_PATH, "zip_code_database.csv")
COUNTY_VOTES_PATH = os.path.join(DATA_DIR_PATH, "county_votes.csv")
# Zip code file-specific constants
# Replacement text for when there is no county name
NO_COUNTY_NAME = "NO_COUNTY_NAME"
# Extraneous words in the zip code file
ZIP_STOP_WORDS = ['County', 'City']
# Length of zip codes provided in zip code file
SHORT_ZIP_CODE_LENGTH = 5
# Length of zip codes that can occur in contributions
LONG_ZIP_CODE_LENGTH = 9
# References points for field names in datasets
ZIP_CODES_FIELD_NAMES = ["zip", "type", "primary_city", "acceptable_cities", "unacceptable_cities", "state", "county",
"timezone", "area_codes", "latitude", "longitude", "world_region", "country", "decommissioned",
"estimated_population", "notes"]
CONTRIBUTIONS_FIELD_NAMES = ["cmte_id", "cand_id", "cand_nm", "contbr_nm", "contbr_city", "contbr_st", "contbr_zip",
"contbr_employer", "contbr_occupation", "contb_receipt_amt", "contb_receipt_dt",
"receipt_desc", "memo_cd", "memo_text", "form_tp", "file_num", "tran_id", "election_tp"]
# Mapping of zipcode to county name
zip_codes = {}
with open(ZIP_CODES_PATH, 'rb') as csvfile:
reader = csv.DictReader(csvfile, ZIP_CODES_FIELD_NAMES)
for row in reader:
# Look out for types, they're different everywhere and need to be consistent
zipcode = str(row['zip'])
# Change in implementation may require state being passed as well
# state = row['state']
county = str(row['county'])
# Cannot split no length strings
if len(county) == 0:
continue
split = county.split()
# TODO: will cause errors when city is valid.
# TODO: add city whitelist, perhaps don't use it at all
# Throw out stop words at the end of the county name
if split[-1] in ZIP_STOP_WORDS:
county = " ".join(split[:-1]).strip()
zip_codes[zipcode] = county
def get_county(zip_code):
try:
zip_code = str(int(zip_code))
except:
return "%s" % NO_COUNTY_NAME
# Convert long zip codes to short
if len(zip_code) == LONG_ZIP_CODE_LENGTH:
zip_code = zip_code[:SHORT_ZIP_CODE_LENGTH]
if zip_code in zip_codes:
return zip_codes[zip_code]
else:
return "%s" % NO_COUNTY_NAME
def example_zip_to_county():
# Rename CONTRIBUTIONS_PATH if you would like to use a smaller test file
# CONTRIBUTIONS_PATH = os.path.join(DATA_DIR_PATH, "VA.csv")
contributions = pd.read_csv(CONTRIBUTIONS_PATH)
print contributions["contbr_zip"].apply(get_county) | mit | Python | |
39f2b145aebbc68ce8e07bc38ff553d5414393bb | add handcontrolmacros.py, not used for now | openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro | software/ddapp/src/python/ddapp/handcontrolmacros.py | software/ddapp/src/python/ddapp/handcontrolmacros.py |
class HandControlMacros(object):
@staticmethod
def openLeft():
handcontrolpanel.panel.ui.leftButton.click()
handcontrolpanel.panel.ui.openButton.animateClick()
@staticmethod
def openRight():
handcontrolpanel.panel.ui.rightButton.click()
handcontrolpanel.panel.ui.openButton.animateClick()
@staticmethod
def closeLeft():
handcontrolpanel.panel.ui.leftButton.click()
handcontrolpanel.panel.widget.advanced.modeBox.setCurrentIndex(0)
handcontrolpanel.panel.ui.closeButton.animateClick()
@staticmethod
def closeRight():
handcontrolpanel.panel.ui.rightButton.click()
handcontrolpanel.panel.widget.advanced.modeBox.setCurrentIndex(0)
handcontrolpanel.panel.ui.closeButton.animateClick()
@staticmethod
def pinchLeft():
handcontrolpanel.panel.ui.leftButton.click()
handcontrolpanel.panel.widget.advanced.modeBox.setCurrentIndex(2)
handcontrolpanel.panel.ui.closeButton.animateClick()
@staticmethod
def pinchRight():
handcontrolpanel.panel.ui.rightButton.click()
handcontrolpanel.panel.widget.advanced.modeBox.setCurrentIndex(2)
handcontrolpanel.panel.ui.closeButton.animateClick()
midiController = viewbehaviors.MidiBehaviorControl()
if midiController.reader:
midiController.start()
midiController.callbacks.connect('s_button_7_pressed', HandControlMacros.openRight)
midiController.callbacks.connect('m_button_7_pressed', HandControlMacros.closeRight)
midiController.callbacks.connect('r_button_7_pressed', HandControlMacros.pinchRight)
midiController.callbacks.connect('s_button_6_pressed', HandControlMacros.openLeft)
midiController.callbacks.connect('m_button_6_pressed', HandControlMacros.closeLeft)
midiController.callbacks.connect('r_button_6_pressed', HandControlMacros.pinchLeft)
def reachWithLeft():
teleoppanel.panel.endEffectorTeleop.setBaseConstraint('xyz only')
teleoppanel.panel.endEffectorTeleop.setBackConstraint('limited')
teleoppanel.panel.endEffectorTeleop.setLFootConstraint('fixed')
teleoppanel.panel.endEffectorTeleop.setRFootConstraint('fixed')
teleoppanel.panel.endEffectorTeleop.setLHandConstraint('ee fixed')
teleoppanel.panel.endEffectorTeleop.setRHandConstraint('arm fixed')
def reachWithRight():
teleoppanel.panel.endEffectorTeleop.setBaseConstraint('xyz only')
teleoppanel.panel.endEffectorTeleop.setBackConstraint('limited')
teleoppanel.panel.endEffectorTeleop.setLFootConstraint('fixed')
teleoppanel.panel.endEffectorTeleop.setRFootConstraint('fixed')
teleoppanel.panel.endEffectorTeleop.setLHandConstraint('arm fixed')
teleoppanel.panel.endEffectorTeleop.setRHandConstraint('ee fixed')
#app.addToolbarMacro('left arm', reachWithLeft)
#app.addToolbarMacro('right arm', reachWithRight)
#app.addToolbarMacro('plan nominal', planNominal)
| bsd-3-clause | Python | |
fe25d61a64d81a5958ab4e2f099ffde3f19d94c9 | Add simple oscillator animation | kpj/OsciPy | animator.py | animator.py | """
Animate evolution of system of oscillators
"""
import numpy as np
import networkx as nx
import matplotlib as mpl
mpl.use('Agg', force=True)
import matplotlib.pylab as plt
import matplotlib.animation as animation
from tqdm import tqdm
from scarce_information import System
class Animator(object):
"""
Generate animated GIF of oscillator behavior
"""
def __init__(self, sols, ts):
"""
Initialize animator.
Arguments:
sols
Solutions of individual oscillators
ts
List of time points
"""
self.sols = sols
self.ts = ts
self.graph = None
self.pos = None
self.pbar = None
def _init_graph(self):
"""
Prepare graph
"""
self.graph = nx.Graph()
self.graph.add_nodes_from(i for i in range(len(self.sols)))
self.pos = nx.nx_pydot.graphviz_layout(self.graph, prog='neato')
def _clear_axis(self, ax):
"""
Prepare axis for drawing.
Arguments
ax
Axis to be prepared
"""
# networkx turns this off by default
plt.axis('on')
# make it fancy
ax.set_axis_bgcolor('black')
ax.grid(b=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def _get_alpha_mapping(self, t):
"""
Generate alpha channel mapping for current point in time.
Arguments
t
Index of current time point
"""
amap = {}
for node in self.graph.nodes():
sol = self.sols[node]
val = sol[t]
amap[node] = val / (2*np.pi)
return amap
def _update(self, t, ax, pbar):
"""
Draw each frame of the animation.
Arguments
t
Frame index in animation
ax
Axis to draw on
pbar
Progressbar handle
"""
pbar.update(1)
plt.cla()
alpha_map = self._get_alpha_mapping(t)
for node, alpha in alpha_map.items():
nx.draw_networkx_nodes(
self.graph, self.pos,
nodelist=[node],
node_color='yellow', node_size=800,
font_size=20, alpha=alpha,
ax=ax)
self._clear_axis(ax)
ax.set_title(r'$t={:.2}$'.format(self.ts[t]))
def _animate(self, fname):
"""
Do actual animation work.
Arguments
fname
Destination filename of GIF
"""
if self.graph is None:
raise RuntimeError('Graph not yet created')
fig = plt.figure()
ax = plt.gca()
with tqdm(total=len(self.ts)) as pbar:
ani = animation.FuncAnimation(
fig, self._update,
frames=len(self.ts),
fargs=(ax, pbar))
ani.save(fname, writer='imagemagick', fps=10, dpi=50) # 200
def create(self, fname):
"""
Create GIF animation.
Arguments
fname
Destination filename of GIF
"""
self._init_graph()
self._animate(fname)
def get_basic_system():
"""
Generate simple exemplary system
"""
graph = nx.cycle_graph(4)
dim = len(graph.nodes())
A = nx.to_numpy_matrix(graph)
B = np.random.uniform(0, 5, size=dim)
omega = np.random.uniform(0, 3)
OMEGA = 3
return System(A, B, omega, OMEGA)
def main():
"""
Exemplary usage
"""
syst = get_basic_system()
sols, ts = syst.solve(0.01, 20)
step = 10
sols, ts = sols.T[::step].T, ts[::step]
anim = Animator(sols % (2*np.pi), ts)
anim.create('network.gif')
if __name__ == '__main__':
main()
| mit | Python | |
252e81efb687fecc1961a908444d0f096e9587b5 | rename pieChart example | pignacio/python-nvd3,Coxious/python-nvd3,liang42hao/python-nvd3,oz123/python-nvd3,liang42hao/python-nvd3,oz123/python-nvd3,vdloo/python-nvd3,yelster/python-nvd3,mgx2/python-nvd3,vdloo/python-nvd3,pignacio/python-nvd3,Coxious/python-nvd3,BibMartin/python-nvd3,oz123/python-nvd3,yelster/python-nvd3,Coxious/python-nvd3,BibMartin/python-nvd3,mgx2/python-nvd3,pignacio/python-nvd3,BibMartin/python-nvd3,liang42hao/python-nvd3,vdloo/python-nvd3,yelster/python-nvd3,mgx2/python-nvd3 | examples/pieChart.py | examples/pieChart.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3 import pieChart
#Open File for test
output_file = open('test1.html', 'w')
type = "pieChart"
chart = pieChart(name=type, height=400, width=400)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
extra_serie = {"tooltip": {"y_start": "", "y_end": " cal"}}
xdata = ["Orange", "Banana", "Pear", "Kiwi", "Apple", "Strawberry", "Pineapple"]
ydata = [3, 4, 0, 1, 5, 7, 3]
chart.add_serie(y=ydata, x=xdata, extra=extra_serie)
chart.buildhtml()
output_file.write(chart.htmlcontent)
#---------------------------------------
#close Html file
output_file.close()
| mit | Python | |
e8e8a07ba70ff3defbc529b6619054001cb692b0 | Add RemoveOutliers python example | jaehyuk/learning-spark,databricks/learning-spark,diogoaurelio/learning-spark,jindalcastle/learning-spark,qingkaikong/learning-spark-examples,coursera4ashok/learning-spark,coursera4ashok/learning-spark,tengteng/learning-spark,junwucs/learning-spark,huydx/learning-spark,GatsbyNewton/learning-spark,kpraveen420/learning-spark,UsterNes/learning-spark,ramyasrigangula/learning-spark,DINESHKUMARMURUGAN/learning-spark,kod3r/learning-spark,obinsanni/learning-spark,kpraveen420/learning-spark,UsterNes/learning-spark,asarraf/learning-spark,rex1100/learning-spark,dsdinter/learning-spark-examples,XiaoqingWang/learning-spark,qingkaikong/learning-spark-examples,mohitsh/learning-spark,anjuncc/learning-spark-examples,jaehyuk/learning-spark,diogoaurelio/learning-spark,ramyasrigangula/learning-spark,asarraf/learning-spark,gaoxuesong/learning-spark,shimizust/learning-spark,negokaz/learning-spark,GatsbyNewton/learning-spark,anjuncc/learning-spark-examples,tengteng/learning-spark,mohitsh/learning-spark,DINESHKUMARMURUGAN/learning-spark,UsterNes/learning-spark,zaxliu/learning-spark,feynman0825/learning-spark,gaoxuesong/learning-spark,ellis429/learning-spark-examples,bhagatsingh/learning-spark,ramyasrigangula/learning-spark,mohitsh/learning-spark,dsdinter/learning-spark-examples,qingkaikong/learning-spark-examples,junwucs/learning-spark,qingkaikong/learning-spark-examples,jaehyuk/learning-spark,ramyasrigangula/learning-spark,JerryTseng/learning-spark,noprom/learning-spark,feynman0825/learning-spark,mmirolim/learning-spark,concerned3rdparty/learning-spark,huixiang/learning-spark,huixiang/learning-spark,ramyasrigangula/learning-spark,huydx/learning-spark,qingkaikong/learning-spark-examples,dsdinter/learning-spark-examples,junwucs/learning-spark,ellis429/learning-spark,SunGuo/learning-spark,obinsanni/learning-spark,bhagatsingh/learning-spark,coursera4ashok/learning-spark,feynman0825/learning-spark,JerryTseng/learning-spark,DINESHKUMARMURUGAN/learning-spark,negokaz/learning-spark,coursera4ashok/learning-spark,diogoaurelio/learning-spark,baokunguo/learning-spark-examples,shimizust/learning-spark,GatsbyNewton/learning-spark,negokaz/learning-spark,noprom/learning-spark,shimizust/learning-spark,kod3r/learning-spark,feynman0825/learning-spark,ellis429/learning-spark-examples,asarraf/learning-spark,mohitsh/learning-spark,shimizust/learning-spark,SunGuo/learning-spark,holdenk/learning-spark-examples,databricks/learning-spark,jindalcastle/learning-spark,holdenk/learning-spark-examples,SunGuo/learning-spark,bhagatsingh/learning-spark,XiaoqingWang/learning-spark,zaxliu/learning-spark,concerned3rdparty/learning-spark,obinsanni/learning-spark,huixiang/learning-spark,jindalcastle/learning-spark,feynman0825/learning-spark,UsterNes/learning-spark,jaehyuk/learning-spark,GatsbyNewton/learning-spark,XiaoqingWang/learning-spark,concerned3rdparty/learning-spark,databricks/learning-spark,holdenk/learning-spark-examples,huixiang/learning-spark,ellis429/learning-spark,ellis429/learning-spark,asarraf/learning-spark,jindalcastle/learning-spark,noprom/learning-spark,mohitsh/learning-spark,XiaoqingWang/learning-spark,NBSW/learning-spark,noprom/learning-spark,tengteng/learning-spark,zaxliu/learning-spark,bhagatsingh/learning-spark,anjuncc/learning-spark-examples,anjuncc/learning-spark-examples,noprom/learning-spark,rex1100/learning-spark,DINESHKUMARMURUGAN/learning-spark,baokunguo/learning-spark-examples,ellis429/learning-spark-examples,dsdinter/learning-spark-examples,gaoxuesong/learning-spark,ellis429/learning-spark-examples,NBSW/learning-spark,concerned3rdparty/learning-spark,kpraveen420/learning-spark,diogoaurelio/learning-spark,holdenk/learning-spark-examples,databricks/learning-spark,huydx/learning-spark,NBSW/learning-spark,coursera4ashok/learning-spark,databricks/learning-spark,zaxliu/learning-spark,junwucs/learning-spark,negokaz/learning-spark,concerned3rdparty/learning-spark,baokunguo/learning-spark-examples,kpraveen420/learning-spark,obinsanni/learning-spark,rex1100/learning-spark,ellis429/learning-spark,gaoxuesong/learning-spark,holdenk/learning-spark-examples,huixiang/learning-spark,asarraf/learning-spark,tengteng/learning-spark,kod3r/learning-spark,SunGuo/learning-spark,UsterNes/learning-spark,bhagatsingh/learning-spark,SunGuo/learning-spark,baokunguo/learning-spark-examples,gaoxuesong/learning-spark,dsdinter/learning-spark-examples,baokunguo/learning-spark-examples,anjuncc/learning-spark-examples,huydx/learning-spark,JerryTseng/learning-spark,mmirolim/learning-spark,zaxliu/learning-spark,kod3r/learning-spark,negokaz/learning-spark,shimizust/learning-spark,mmirolim/learning-spark,NBSW/learning-spark,mmirolim/learning-spark,ellis429/learning-spark,DINESHKUMARMURUGAN/learning-spark,JerryTseng/learning-spark,tengteng/learning-spark,kpraveen420/learning-spark,jaehyuk/learning-spark,NBSW/learning-spark,ellis429/learning-spark-examples,XiaoqingWang/learning-spark,diogoaurelio/learning-spark,JerryTseng/learning-spark,obinsanni/learning-spark,kod3r/learning-spark,junwucs/learning-spark,GatsbyNewton/learning-spark,jindalcastle/learning-spark,mmirolim/learning-spark,huydx/learning-spark | src/python/RemoveOutliers.py | src/python/RemoveOutliers.py | """
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1000])
>>> sorted(removeOutliers(b).collect()
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
import sys
import math
from pyspark import SparkContext
def removeOutliers(nums):
"""Remove the outliers"""
stats = nums.stats()
stddev = math.sqrt(stats.variance())
print "stddev is "
print stddev
return nums.filter(lambda x: math.fabs(x - stats.mean()) < 3 * stddev)
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "Sum")
nums = sc.parallelize([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1000])
output = sorted(removeOutliers(nums).collect())
for num in output:
print "%i " % (num)
| mit | Python | |
9ab0ead47220d704a3c80cae25ddd05afac55271 | Add example plugin to solve with angr | Vector35/binaryninja-api,joshwatson/binaryninja-api,Vector35/binaryninja-api,joshwatson/binaryninja-api,Vector35/binaryninja-api,Vector35/binaryninja-api,Vector35/binaryninja-api,Vector35/binaryninja-api,joshwatson/binaryninja-api,joshwatson/binaryninja-api,joshwatson/binaryninja-api,Vector35/binaryninja-api | python/examples/angr_plugin.py | python/examples/angr_plugin.py | # This plugin assumes angr is already installed and available on the system. See the angr documentation
# for information about installing angr. It should be installed using the virtualenv method.
#
# This plugin is currently only known to work on Linux using virtualenv. Switch to the virtual environment
# (using a command such as "workon angr"), then run the Binary Ninja UI from the command line.
#
# This method is known to fail on Mac OS X as the virtualenv used by angr does not appear to provide a
# way to automatically link to the correct version of Python, even when running the UI from within the
# virtual environment. A later update may allow for a manual override to link to the required version
# of Python.
__name__ = "__console__" # angr looks for this, it won't load from within a UI without it
import angr
from binaryninja import *
import tempfile
import threading
import logging
import os
# Disable warning logs as they show up as errors in the UI
logging.disable(logging.WARNING)
# Create sets in the BinaryView's data field to store the desired path for each view
BinaryView.set_default_data("angr_find", set())
BinaryView.set_default_data("angr_avoid", set())
def escaped_output(str):
return '\n'.join([s.encode("string_escape") for s in str.split('\n')])
# Define a background thread object for solving in the background
class Solver(BackgroundTaskThread):
def __init__(self, find, avoid, view):
BackgroundTaskThread.__init__(self, "Solving with angr...", True)
self.find = tuple(find)
self.avoid = tuple(avoid)
self.view = view
# Write the binary to disk so that the angr API can read it
self.binary = tempfile.NamedTemporaryFile()
self.binary.write(view.file.raw.read(0, len(view.file.raw)))
self.binary.flush()
def run(self):
# Create an angr project and an explorer with the user's settings
p = angr.Project(self.binary.name)
e = p.surveyors.Explorer(find = self.find, avoid = self.avoid)
# Solve loop
while not e.done:
if self.cancelled:
# Solve cancelled, show results if there were any
if len(e.found) > 0:
break
return
# Perform the next step in the solve
e.step()
# Update status
active_count = len(e.active)
found_count = len(e.found)
progress = "Solving with angr (%d active path%s" % (active_count, "s" if active_count != 1 else "")
if found_count > 0:
progress += ", %d path%s found" % (found_count, "s" if found_count != 1 else "")
self.progress = progress + ")..."
# Solve complete, show report
text_report = "Found %d path%s.\n\n" % (len(e.found), "s" if len(e.found) != 1 else "")
i = 1
for f in e.found:
text_report += "Path %d\n" % i + "=" * 10 + "\n"
text_report += "stdin:\n" + escaped_output(f.state.posix.dumps(0)) + "\n\n"
text_report += "stdout:\n" + escaped_output(f.state.posix.dumps(1)) + "\n\n"
text_report += "stderr:\n" + escaped_output(f.state.posix.dumps(2)) + "\n\n"
i += 1
name = self.view.file.filename
if len(name) > 0:
show_plain_text_report("Results from angr - " + os.path.basename(self.view.file.filename), text_report)
else:
show_plain_text_report("Results from angr", text_report)
def find_instr(bv, addr):
# Highlight the instruction in green
blocks = bv.get_basic_blocks_at(addr)
for block in blocks:
block.set_auto_highlight(HighlightColor(GreenHighlightColor, alpha = 128))
block.function.set_auto_instr_highlight(block.arch, addr, GreenHighlightColor)
# Add the instruction to the list associated with the current view
bv.data.angr_find.add(addr)
def avoid_instr(bv, addr):
# Highlight the instruction in red
blocks = bv.get_basic_blocks_at(addr)
for block in blocks:
block.set_auto_highlight(HighlightColor(RedHighlightColor, alpha = 128))
block.function.set_auto_instr_highlight(block.arch, addr, RedHighlightColor)
# Add the instruction to the list associated with the current view
bv.data.angr_avoid.add(addr)
def solve(bv):
if len(bv.data.angr_find) == 0:
show_message_box("Angr Solve", "You have not specified a goal instruction.\n\n" +
"Please right click on the goal instruction and select \"Find Path to This Instruction\" to " +
"continue.", OKButtonSet, ErrorIcon)
return
# Start a solver thread for the path associated with the view
s = Solver(bv.data.angr_find, bv.data.angr_avoid, bv)
s.start()
# Register commands for the user to interact with the plugin
PluginCommand.register_for_address("Find Path to This Instruction",
"When solving, find a path that gets to this instruction", find_instr)
PluginCommand.register_for_address("Avoid This Instruction",
"When solving, avoid paths that reach this instruction", avoid_instr)
PluginCommand.register("Solve With Angr", "Attempt to solve for a path that satisfies the constraints given", solve)
| mit | Python | |
2c651b7083ec368ebf226364a1a1aba5f5ec147e | Add migration for datetime change. | hoosteeno/fjord,DESHRAJ/fjord,staranjeet/fjord,hoosteeno/fjord,lgp171188/fjord,DESHRAJ/fjord,hoosteeno/fjord,Ritsyy/fjord,staranjeet/fjord,lgp171188/fjord,rlr/fjord,rlr/fjord,staranjeet/fjord,hoosteeno/fjord,lgp171188/fjord,staranjeet/fjord,rlr/fjord,mozilla/fjord,mozilla/fjord,lgp171188/fjord,Ritsyy/fjord,Ritsyy/fjord,rlr/fjord,DESHRAJ/fjord,mozilla/fjord,mozilla/fjord,Ritsyy/fjord | fjord/feedback/migrations/0003_auto__chg_field_simple_created.py | fjord/feedback/migrations/0003_auto__chg_field_simple_created.py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'feedback.simple': {
'Meta': {'ordering': "['-created']", 'object_name': 'Simple'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['feedback'] | bsd-3-clause | Python | |
b4fe0a903e5a7bff2dca0c6c7c66613d27d14ef9 | add defalt tests | shtalinberg/django-actions-logger,shtalinberg/django-actions-logger | actionslog/tests/__init__.py | actionslog/tests/__init__.py | """Test model definitions."""
from __future__ import unicode_literals
from django.core.management import call_command
call_command('makemigrations', verbosity=0)
call_command('migrate', verbosity=0) | mit | Python | |
6e77dbbad4cde687ab112c7c0a5da2a958a3ea42 | Add synthtool scripts (#3765) | googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java | java-websecurityscanner/google-cloud-websecurityscanner/synth.py | java-websecurityscanner/google-cloud-websecurityscanner/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='websecurityscanner',
version='v1alpha',
config_path='/google/cloud/websecurityscanner/artman_websecurityscanner_v1alpha.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-websecurityscanner-v1alpha/src', 'src')
s.copy(library / 'grpc-google-cloud-websecurityscanner-v1alpha/src', '../../google-api-grpc/grpc-google-cloud-websecurityscanner-v1alpha/src')
s.copy(library / 'proto-google-cloud-websecurityscanner-v1alpha/src', '../../google-api-grpc/proto-google-cloud-websecurityscanner-v1alpha/src')
| apache-2.0 | Python | |
4677085404e41dac88b92418bbc46c6a1dea30df | Add snapshot function. | fuziontech/pgshovel,fuziontech/pgshovel,fuziontech/pgshovel,disqus/pgshovel,disqus/pgshovel | src/main/python/pgshovel/snapshot.py | src/main/python/pgshovel/snapshot.py | import collections
from contextlib import contextmanager
from psycopg2.extensions import ISOLATION_LEVEL_REPEATABLE_READ
@contextmanager
def prepare(connection, table):
"""
Prepares a function that can be used to take a consistent snapshot of a set
of capture groups by their primary keys.
"""
connection.set_session(
isolation_level=ISOLATION_LEVEL_REPEATABLE_READ, # READ_COMMITTED might work here too, but being safe.
readonly=True,
)
# TODO: This doesn't play well with self joins right now -- duplicate
# tables will need to be aliased (this should be a straightforward change.)
# TODO: This also doesn't handle 1:N joins right now.
joins = []
columns = []
Transaction = collections.namedtuple('Transaction', 'id timestamp')
Snapshot = collections.namedtuple('Snapshot', 'key state transaction')
def collect(table):
for column in sorted(set(tuple(table.columns) + (table.primary_key,))):
columns.append((table.name, column))
for join in table.joins:
joins.append('LEFT OUTER JOIN {join.table.name} ON {table.name}.{table.primary_key} = {join.table.name}.{join.foreign_key}'.format(join=join, table=table))
collect(join.table)
collect(table)
statement = 'SELECT {table.name}.{table.primary_key} as key, {columns} FROM {table.name}'.format(
columns=', '.join('{0}.{1} as {0}__{1}'.format(*i) for i in columns),
table=table,
)
if joins:
statement = ' '.join((statement, ' '.join(joins)))
statement = statement + ' WHERE {table.name}.{table.primary_key} IN %s'.format(table=table)
with connection.cursor() as cursor:
def snapshot(keys):
# Fetch the transaction metadata.
cursor.execute('SELECT txid_current(), extract(epoch from NOW())')
(result,) = cursor.fetchall()
transaction = Transaction(*result)
seen = set()
# Fetch all of the rows that exist.
# TODO: Actually make this a real prepared statement.
cursor.execute(statement, (tuple(keys),))
for row in cursor:
result = collections.defaultdict(dict)
for (table, name), value in zip(columns, row[1:]):
result[table][name] = value
key = row[0]
seen.add(key)
yield Snapshot(key, dict(result), transaction)
# Also return records for any missing rows.
missing = set(keys) - seen
for key in missing:
yield Snapshot(key, None, transaction)
# Ensure that long lived connections are not stuck idle in transaction.
connection.commit()
yield snapshot
if __name__ == '__main__':
import psycopg2
import sys
from pgshovel.interfaces.groups_pb2 import TableConfiguration
from pgshovel.utilities.protobuf import TextCodec
table = TextCodec(TableConfiguration).decode(open(sys.argv[2]).read())
with prepare(psycopg2.connect(sys.argv[1]), table) as snapshot:
for result in snapshot(map(int, sys.argv[3:])):
print result
| apache-2.0 | Python | |
3e5abf8d7a265e5b873336db04945fca591afb80 | add Read_xls.py file | wyblzu/Python_GDAL | Read_xls.py | Read_xls.py | __author__ = 'wyb'
# -*- coding:utf-8 -*-
# testing gdal-lib
import gdal
import numpy
import struct
from gdalconst import *
dataset = gdal.Open('/home/wyb/data/SPOT.img', GA_ReadOnly)
if dataset is None:
print 'Nodata'
else:
print 'Driver:', dataset.GetDriver().ShortName, '/', \
dataset.GetDriver().LongName
print 'Size is', dataset.RasterXSize, 'x', dataset.RasterYSize,\
'x', dataset.RasterCount
print 'Projection is', dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
if not geotransform is None:
print 'Origin = (', geotransform[0], ',', geotransform[3], ')'
print 'Pixel Size = (', geotransform[1], ',', geotransform[5], ')'
print 'Angle = (', geotransform[2], ',', geotransform[4], ')'
band = dataset.GetRasterBand(1)
print 'Band Type = ', gdal.GetDataTypeName(band.DataType)
min = band.GetMinimum()
max = band.GetMaximum()
if min is None or max is None:
(min, max) = band.ComputeRasterMinMax(1)
print 'min = %.3f, max = %.3f' % (min, max)
if band.GetOverviewCount() > 0:
print 'Band has ', band.GetOverviewCount(), ' overviews.'
if not band.GetRasterColorTable() is None:
print 'Band has a color table with ', band.GetRasterColorTable().GetCount(), ' entries.'
scanline = band.ReadRaster(0, 0, band.XSize, 1, band.XSize, 1, GDT_Float32)
tuple_of_floats = struct.unpack('f' * band.XSize, scanline)
print tuple_of_floats
# qwer = dataset.G
# width = dataset.RasterXSize
# height = dataset.RasterYSize
# bw = 0.1
# bh = 0.1
# x = int(width*bw)
# y = int(height*bh)
# datas = []
# for i in range(3):
# band = dataset.GetRasterBand(i + 1)
# data = band.ReadAsArray(0, 0, width, height, x, y)
# datas.append(numpy.reshape(data, (1, -1)))
# datas = numpy.concatenate(datas)
# driver = gdal.GetDriverByName("GTiff")
# tods = driver.Create('/home/wyb/pycharm/data/tpix2.tiff', x, y, 3, options=["INTERLEAVE=PIXEL"])
# tods.WriteRaster(0, 0, x, y, datas.tostring(), x, y, band_list=[1, 2, 3])
# import gdal
# import numpy
# from gdalconst import *
# dataset = gdal.Open("aster//earth.img")
# width = dataset.RasterXSize
# height = dataset.RasterYSize
# bw = 0.1
# bh = 0.1
# x = int(width*bw)
# y = int(height*bh)
# datas = []
# for i in range(3):
# band = dataset.GetRasterBand(i+1)
# data = band.ReadAsArray(0,0,width,height,x,y)
# datas.append(numpy.reshape(data,(1,-1)))
# datas = numpy.concatenate(datas)
# driver = gdal.GetDriverByName("GTiff")
# tods = driver.Create("Raster//tpix1.tif",x,y,3,options=["INTERLEAVE=PIXEL"])
# tods.WriteRaster(0,0,x,y,datas.tostring(),x,y,band_list=[1,2,3])
| mit | Python | |
e80b1f81152313947b1a94efe78279b575186731 | Create autoexec.py | matbor/mqtt2xbmc-notifications | autoexec.py | autoexec.py | #!/usr/bin/python
#
# XBMC MQTT Subscriber and custom popup
# october 2013 v1.0
#
# by Matthew Bordignon @bordignon
#
# Format mqtt message using json
# {"lvl":"1","sub":"xxxxxx","txt":"xxxxxx","img":"xxx","del":"10000"}
# {"lvl":"level either 1 or 2","sub":"subject text","txt":"main text 150 characters max","img":"icon location","del":"display msg this long"}
#
# todo;
# - need to change to a addon with config settings, currently just using the autoexec.py and modifying settings below
# - deal with the odd ascii characters, typical only has issues on the internal xbmc notification system
# - handle the exiting of XBMC, by killing this script nicely... will this effect will_set >? atm xbmc forces a kill
# -
import mosquitto #you need to have this file
import xbmc
import json
import xbmcgui
import datetime
import socket
xbmc_name = socket.gethostname() #using the hostname for the mqtt will_set
#settings BEGIN
broker = "mqtt.localdomain" #mqtt broker location
broker_port = 1883 #mqtt broker port
topic_xbmcmsgs = "/house/xbmc/all/messages"
topic_xbmcstatus = "/house/xbmc/"+xbmc_name+"/status" #the topic location for the mqtt online/offline last will and testament (will_set), using the xbmc hostname as well here
background = 'special://masterprofile/Thumbnails/background.png' #location of the background image for the popup box
mqtt_logo = 'special://masterprofile/Thumbnails/mqtt.png'
#settings END
print('XBMC MQTT -- Subscriber and custom popup starting on: ' + xbmc_name)
now = datetime.datetime.now()
class PopupWindow(xbmcgui.WindowDialog):
def __init__(self, image, subject, text):
#setup the text layout
line1text = now.strftime("%d %B %Y at %I:%M%p")
line2text = 'Message : ' + subject
line3text = '-----------------------------'
line4text = text[:49]
line5text = text[49:99]
line6text = text[99:150]
#define the XBMCgui control
self.addControl(xbmcgui.ControlImage(10,10,700,180, background))
self.addControl(xbmcgui.ControlImage(x=25, y=30, width=150, height=150, filename=image))
self.addControl(xbmcgui.ControlLabel(x=190, y=25, width=900, height=25, label=line1text))
self.addControl(xbmcgui.ControlLabel(x=190, y=50, width=900, height=25, label=line2text))
self.addControl(xbmcgui.ControlLabel(x=190, y=75, width=900, height=25, label=line3text))
self.addControl(xbmcgui.ControlLabel(x=190, y=100, width=900, height=25, label=line4text))
self.addControl(xbmcgui.ControlLabel(x=190, y=125, width=900, height=25, label=line5text))
self.addControl(xbmcgui.ControlLabel(x=190, y=150, width=900, height=25, label=line6text))
def on_connect(mosq, obj, rc):
print("XBMC MQTT -- rc: "+str(rc))
xbmc.executebuiltin('Notification(MQTT Connected,waiting for message,5000,'+mqtt_logo+')\'')
mqttc.publish(topic_xbmcstatus, payload="online", qos=0, retain=True)
def on_message(mosq, obj, msg):
print('XBMC MQTT -- message receieved')
print("XBMC MQTT -- "+msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
#check to see if valid json string has been found, normally we just check the level.
list = []
try :
list = json.loads(str(msg.payload))
#if level-1 msg, use my popup window
if list['lvl'] == "1":
print("XBMC MQTT -- level 1 priority message")
window = PopupWindow(list['img'],list['sub'],list['txt'])
window.show()
xbmc.sleep(int(list['del'])) #how long to show the window for
window.close()
#if level-2 msg, use xbmc builtin notification
if list['lvl'] == "2":
print("XBMC MQTT -- level 2 priority message")
xbmc.executebuiltin('Notification('+list['sub']+','+list['txt']+','+list['del']+','+list['img']+')\'')
except: #deal with the errors
print('XBMC MQTT -- error, not sure why, sometimes it is because it might not valid JSON string revieved!')
xbmc.executebuiltin('Notification(Error, not a valid message pls chk,10000,'+mqtt_logo+')\'')
def on_publish(mosq, obj, mid):
print("XBMC MQTT -- pulish mid: "+str(mid))
def on_subscribe(mosq, obj, mid, granted_qos):
print("XBMC MQTT -- Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mosq, obj, level, string):
print("XBMC MQTT -- " + string)
# If you want to use a specific client id, use
# mqttc = mosquitto.Mosquitto("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mosquitto.Mosquitto()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.will_set(topic_xbmcstatus, payload="offline", qos=0, retain=True)
# Uncomment to enable debug messages
#mqttc.on_log = on_log
mqttc.connect(broker, broker_port, 60)
mqttc.subscribe(topic_xbmcmsgs, 0)
rc = 0
while rc == 0:
rc = mqttc.loop()
print("XBMC MQTT -- rc: "+str(rc))
| mit | Python | |
635ed78543b3e3e8fe7c52fa91ee8516d617249d | Add client sample for test - Connect to server - Receive data from server - Strip the data and print | peitaosu/motion-tools | data-travel/test_client.py | data-travel/test_client.py | from socket import *
host = '127.0.0.1'
port = 1234
bufsize = 1024
addr = (host, port)
client = socket(AF_INET, SOCK_STREAM)
client.connect(addr)
while True:
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close() | apache-2.0 | Python | |
40c19953e55e11a8b7e48e9894d8dd2322d6bd11 | Add engine zoom functions | shivnshu/AMR-System,shivnshu/AMR-System,shivnshu/AMR-System,shivnshu/AMR-System | temp_volume.py | temp_volume.py | #!/usr/bin/env python
from __future__ import print_function
from OCC.STEPControl import STEPControl_Reader
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
from OCC.Display.SimpleGui import init_display
from OCC.gp import gp_Ax1, gp_Pnt, gp_Dir, gp_Trsf
from OCC.BRepPrimAPI import BRepPrimAPI_MakeBox
from OCC.TopLoc import TopLoc_Location
import socket
import math
step_reader = STEPControl_Reader()
status = step_reader.ReadFile('./models/ice/Assembly1.stp')
if status == IFSelect_RetDone: # check status
failsonly = False
step_reader.PrintCheckLoad(failsonly, IFSelect_ItemsByEntity)
step_reader.PrintCheckTransfer(failsonly, IFSelect_ItemsByEntity)
ok = step_reader.TransferRoot(1)
_nbs = step_reader.NbShapes()
aResShape = step_reader.Shape(1)
else:
print("Error: can't read file.")
sys.exit(0)
def start_animation(event=None):
server_address = ("172.27.30.2", 5000)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
msg = 'c'
sock.sendto(msg, server_address)
c = 0
zoomIn_old = 0
zoomOut_old = 0
while True:
data, addr = sock.recvfrom(1024) # buffer size is 1024 byte
arr = data.split()
x = round(float(arr[0]), 2)
y = round(float(arr[1]), 2)
z = round(float(arr[2]), 2)
w = round(float(arr[3]), 2)
zoomIn = int(arr[4])
zoomOut = int(arr[5])
c = c + 1
if c==10:
# display.ZoomFactor(2)
if(zoomIn != zoomIn_old):
display.ZoomFactor(1.1)
zoomIn_old = zoomIn
if(zoomOut != zoomOut_old):
display.ZoomFactor(0.9)
zoomOut_old = zoomOut
ax1 = gp_Ax1(gp_Pnt(0., 0., 0.), gp_Dir(x, y, z))
aCubeTrsf = gp_Trsf()
angle = 2*math.acos(w)
aCubeTrsf.SetRotation(ax1, angle)
aCubeToploc = TopLoc_Location(aCubeTrsf)
display.Context.SetLocation(ais_boxshp, aCubeToploc)
display.Context.UpdateCurrentViewer()
c = 0
display, start_display, add_menu, add_function_to_menu = init_display()
add_menu('animation')
add_function_to_menu('animation', start_animation)
ais_boxshp = display.DisplayShape(aResShape, update=True)
start_display()
| mit | Python | |
e9979dc54b884c8a300cc3d663e44c793b7afd57 | Add script term-weight.py, computting term-weight for query/bidword. | SnakeHunt2012/word2vec,SnakeHunt2012/word2vec,SnakeHunt2012/word2vec,SnakeHunt2012/word2vec,SnakeHunt2012/word2vec | term-weight.py | term-weight.py | #!/usr/bin/env python
# -*- coding: utf-8
from codecs import open
from argparse import ArgumentParser
def load_entropy_dict(entropy_file):
entropy_dict = {}
with open(entropy_file, 'r') as fd:
for line in fd:
splited_line = line.strip().split("\t")
if len(splited_line) != 2:
continue
term, entropy = splited_line
if term not in entropy_dict:
entropy_dict[term] = float(entropy)
return entropy_dict
def main():
parser = ArgumentParser()
parser.add_argument("seg_file", help = "segment file with term weight one query/bidword per line in tsv format")
parser.add_argument("left_entropy_file", help = "word-entropy (left entropy) file in tsv format")
parser.add_argument("right_entropy_file", help = "word-entropy (right entropy) file in tsv format")
args = parser.parse_args()
seg_file = args.seg_file
left_entropy_file = args.left_entropy_file
right_entropy_file = args.right_entropy_file
left_entropy_dict = load_entropy_dict(left_entropy_file)
right_entropy_dict = load_entropy_dict(right_entropy_file)
with open(seg_file, 'r') as fd:
for line in fd:
splited_line = line.strip().split("\t")
if len(splited_line) != 2:
continue
phrase_str, phrase_seg = splited_line
word_entropy_list = [(token.split("")[0],
float(token.split("")[1]),
float(left_entropy_dict[token.split("")[0]]) if token.split("")[0] in left_entropy_dict else None,
float(right_entropy_dict[token.split("")[0]]) if token.split("")[0] in right_entropy_dict else None)
for token in phrase_seg.split("")]
if word_entropy_list is None or len(word_entropy_list) == 0:
continue
print " ".join(["%s/%s/%s/%s" % (word, weight, left_entropy, right_entropy) for word, weight, left_entropy, right_entropy in word_entropy_list])
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
769a255fbcaf1b657f1a76ff89d3ffa6998e4ada | Support unix socket connections in redis pool | BuildingLink/sentry,alexm92/sentry,daevaorn/sentry,fotinakis/sentry,daevaorn/sentry,gencer/sentry,ifduyue/sentry,jean/sentry,mvaled/sentry,ifduyue/sentry,zenefits/sentry,ifduyue/sentry,JamesMura/sentry,gencer/sentry,mvaled/sentry,JackDanger/sentry,zenefits/sentry,fotinakis/sentry,nicholasserra/sentry,zenefits/sentry,alexm92/sentry,nicholasserra/sentry,ifduyue/sentry,beeftornado/sentry,beeftornado/sentry,daevaorn/sentry,ifduyue/sentry,jean/sentry,mitsuhiko/sentry,BuildingLink/sentry,looker/sentry,JackDanger/sentry,fotinakis/sentry,nicholasserra/sentry,BuildingLink/sentry,looker/sentry,zenefits/sentry,daevaorn/sentry,mvaled/sentry,looker/sentry,mvaled/sentry,jean/sentry,JamesMura/sentry,gencer/sentry,looker/sentry,BuildingLink/sentry,JamesMura/sentry,jean/sentry,beeftornado/sentry,gencer/sentry,JamesMura/sentry,zenefits/sentry,looker/sentry,alexm92/sentry,BuildingLink/sentry,fotinakis/sentry,mvaled/sentry,mitsuhiko/sentry,JackDanger/sentry,mvaled/sentry,gencer/sentry,JamesMura/sentry,jean/sentry | src/sentry/utils/redis.py | src/sentry/utils/redis.py | from __future__ import absolute_import
from threading import Lock
import rb
from redis.connection import ConnectionPool
from sentry.exceptions import InvalidConfiguration
from sentry.utils.versioning import (
Version,
check_versions,
)
_pool_cache = {}
_pool_lock = Lock()
def _shared_pool(**opts):
if 'host' in opts:
key = '%s:%s/%s' % (
opts['host'],
opts['port'],
opts['db'],
)
else:
key = '%s/%s' % (
opts['path'],
opts['db']
)
pool = _pool_cache.get(key)
if pool is not None:
return pool
with _pool_lock:
pool = _pool_cache.get(key)
if pool is not None:
return pool
pool = ConnectionPool(**opts)
_pool_cache[key] = pool
return pool
def make_rb_cluster(hosts):
"""Returns a rb cluster that internally shares the pools more
intelligetly.
"""
return rb.Cluster(hosts, pool_cls=_shared_pool)
def check_cluster_versions(cluster, required, recommended=Version((3, 0, 4)), label=None):
try:
with cluster.all() as client:
results = client.info()
except Exception as e:
# Any connection issues should be caught here.
raise InvalidConfiguration(unicode(e))
versions = {}
for id, info in results.value.items():
host = cluster.hosts[id]
# NOTE: This assumes there is no routing magic going on here, and
# all requests to this host are being served by the same database.
key = '{host}:{port}'.format(host=host.host, port=host.port)
versions[key] = Version(map(int, info['redis_version'].split('.', 3)))
check_versions(
'Redis' if label is None else 'Redis (%s)' % (label,),
versions,
required,
recommended,
)
| from __future__ import absolute_import
from threading import Lock
import rb
from redis.connection import ConnectionPool
from sentry.exceptions import InvalidConfiguration
from sentry.utils.versioning import (
Version,
check_versions,
)
_pool_cache = {}
_pool_lock = Lock()
def _shared_pool(**opts):
key = '%s:%s/%s' % (
opts['host'],
opts['port'],
opts['db'],
)
pool = _pool_cache.get(key)
if pool is not None:
return pool
with _pool_lock:
pool = _pool_cache.get(key)
if pool is not None:
return pool
pool = ConnectionPool(**opts)
_pool_cache[key] = pool
return pool
def make_rb_cluster(hosts):
"""Returns a rb cluster that internally shares the pools more
intelligetly.
"""
return rb.Cluster(hosts, pool_cls=_shared_pool)
def check_cluster_versions(cluster, required, recommended=Version((3, 0, 4)), label=None):
try:
with cluster.all() as client:
results = client.info()
except Exception as e:
# Any connection issues should be caught here.
raise InvalidConfiguration(unicode(e))
versions = {}
for id, info in results.value.items():
host = cluster.hosts[id]
# NOTE: This assumes there is no routing magic going on here, and
# all requests to this host are being served by the same database.
key = '{host}:{port}'.format(host=host.host, port=host.port)
versions[key] = Version(map(int, info['redis_version'].split('.', 3)))
check_versions(
'Redis' if label is None else 'Redis (%s)' % (label,),
versions,
required,
recommended,
)
| bsd-3-clause | Python |
8dcda61421af49320aa7865d9a51847a94dc1904 | Create retrieve_lowest_fares_to_db.py | jebstone/datascience | retrieve_lowest_fares_to_db.py | retrieve_lowest_fares_to_db.py | # -*- coding: utf-8 -*-
"""
Retrieve airfare pricing data, for a list of routekeys, from an internal API. Load to a database.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import requests
import json
import datetime
import pyodbc as db
import datetime
import time
# Database Connection String Parameters connecting to a local database
#db_config = {
# 'Driver': '{MySQL ODBC 5.1 Driver}',
# 'Server': '',
# 'Database': 'advice_data',
# 'user': '',
# 'password': ''
#}
db_config = {
'Driver': '{SQL Server}',
'Server': '',
'Database': 'advice_data',
'user': '',
'password': ''
}
app_config = {
'minDepartDate': '20140222',
'maxDepartDate': '20140601'
}
routekeys = ['DFW.LAX.US',
'CHI.NYC.US',
'BOS.WAS.US',
'HOU.YCC.US',
'ORL.LAS.US',
'ATL.SEA.US']
def dt(intDate):
""" Accepts a date in integer format (20150101) and converts to a date """
strDate = str(intDate)
year = int(strDate[0:4])
month = int(strDate[4:6])
day = int(strDate[6:8])
s = datetime.datetime(year, month, day)
return s
def loop_getLowDepartPrices(routekey):
""" Accepts a list of routekeys. Calls the pricing API for each routekey, retrieves JSON results, and writes to a database """
db_connection = db.connect(**db_config)
print("Opening database connection...")
db_cursor = db_connection.cursor()
for routekey in routekeys:
params = routekey.split('.')
param_dict = {}
param_dict["origin"] = params[0]
param_dict["destination"] = params[1]
param_dict["pos"] = params[2]
param_dict["minDepartDate"] = app_config["minDepartDate"]
param_dict["maxDepartDate"] = app_config["maxDepartDate"]
advice_json = getLowDepartPrices(param_dict) # Who ya gonna call?
err_list = ""
for error in advice_json.get("errors"):
err_list = err_list + error
print("Loading results to database...")
for record in advice_json.get("prices"):
db_cursor.execute("insert into dbo.getLowDepartPrices2 values (?,?,?,?,?,?,?,?,?)", datetime.datetime.now(), param_dict["origin"], param_dict["destination"], param_dict["pos"], dt(param_dict["minDepartDate"]), dt(param_dict["maxDepartDate"]), err_list, dt(record.get("date")), record.get("price"))
db_connection.commit()
#print(advice_json)
print("Closing DB connection...")
db_connection.close()
def getLowDepartPrices(kwargs):
""" Request data from the pricing API for a single routekey and return JSON """
url = "http://.../getLowDepartPrices"
print("Requesting data via API...")
r = requests.get(url, params=kwargs)
jsondata = json.loads(r.content)
return jsondata
loop_getLowDepartPrices(routekeys)
#print(advice_getLowDepartPrices(origin='DFW', destination='LAX', minDepartDate='20140210', maxDepartDate="20140211", pos='US'))
| unlicense | Python | |
3222504bd076368c7a3069f9accf2627ad1dab86 | Add __init__ to repo root for subclassing | Dinh-Hung-Tu/rivescript-python,Dinh-Hung-Tu/rivescript-python,plasmashadow/rivescript-python,aichaos/rivescript-python,aichaos/rivescript-python,plasmashadow/rivescript-python,aichaos/rivescript-python,FujiMakoto/makoto-rivescript,FujiMakoto/makoto-rivescript,plasmashadow/rivescript-python,Dinh-Hung-Tu/rivescript-python,FujiMakoto/makoto-rivescript | __init__.py | __init__.py | from rivescript import RiveScript
| mit | Python | |
849adb1ea4c6ff46145e18dbe4fd949b5d5a4d7d | Create __init__.py | TryCatchHCF/DumpsterFire | __init__.py | __init__.py | mit | Python | ||
4d8aaca73a8dbe8895ca4a523828fbb7ecd3fc7f | Add __init__ file | snapbill/snapbill-pyapi | __init__.py | __init__.py | from snapbill import *
| mit | Python | |
b64e7af67161fecb7aec8797497e1d3a4f3e91fe | complete 35 circular primes | dawran6/project-euler | 35-circular-primes.py | 35-circular-primes.py | from utils import prime_gen
def rotate(l):
for x in range(len(l)):
yield l[-x:] + l[:-x]
def circular_primes():
p = prime_gen()
while True:
prime = next(p)
if prime > 1_000_000:
break
else:
s.add(str(prime))
for prime in s:
if all((''.join(p) in s)
for p in rotate(prime)):
yield int(prime)
if __name__ == '__main__':
s = set()
ans = len(list(circular_primes()))
| mit | Python | |
313e9cae068192fe11ad10ea0b5c05061b0e5c60 | Add unit test for _validate_key_file_permissions in ec2 module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/unit/cloud/clouds/ec2_test.py | tests/unit/cloud/clouds/ec2_test.py | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def test__validate_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name
os.chmod(key_file, 0o644)
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
os.chmod(key_file, 0o600)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
os.chmod(key_file, 0o400)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
# tmp file removed
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
if __name__ == '__main__':
from unit import run_tests
run_tests(EC2TestCase, needs_daemon=False)
| apache-2.0 | Python | |
22f57f6490826b99e35b80d85837c95921cd6c97 | add test to reproduce keyerror | zestyr/lbry,zestyr/lbry,lbryio/lbry,lbryio/lbry,zestyr/lbry,lbryio/lbry | tests/unit/dht/test_routingtable.py | tests/unit/dht/test_routingtable.py | import unittest
from lbrynet.dht import contact, routingtable, constants
class KeyErrorFixedTest(unittest.TestCase):
""" Basic tests case for boolean operators on the Contact class """
def setUp(self):
own_id = (2 ** constants.key_bits) - 1
# carefully chosen own_id. here's the logic
# we want a bunch of buckets (k+1, to be exact), and we want to make sure own_id
# is not in bucket 0. so we put own_id at the end so we can keep splitting by adding to the
# end
self.table = routingtable.OptimizedTreeRoutingTable(own_id)
def fill_bucket(self, bucket_min):
bucket_size = constants.k
for i in range(bucket_min, bucket_min + bucket_size):
self.table.addContact(contact.Contact(long(i), '127.0.0.1', 9999, None))
def overflow_bucket(self, bucket_min):
bucket_size = constants.k
self.fill_bucket(bucket_min)
self.table.addContact(
contact.Contact(long(bucket_min + bucket_size + 1), '127.0.0.1', 9999, None))
def testKeyError(self):
# find middle, so we know where bucket will split
bucket_middle = self.table._buckets[0].rangeMax / 2
# fill last bucket
self.fill_bucket(self.table._buckets[0].rangeMax - constants.k - 1)
# -1 in previous line because own_id is in last bucket
# fill/overflow 7 more buckets
bucket_start = 0
for i in range(0, constants.k):
self.overflow_bucket(bucket_start)
bucket_start += bucket_middle / (2 ** i)
# replacement cache now has k-1 entries.
# adding one more contact to bucket 0 used to cause a KeyError, but it should work
self.table.addContact(contact.Contact(long(constants.k + 2), '127.0.0.1', 9999, None))
# import math
# print ""
# for i, bucket in enumerate(self.table._buckets):
# print "Bucket " + str(i) + " (2 ** " + str(
# math.log(bucket.rangeMin, 2) if bucket.rangeMin > 0 else 0) + " <= x < 2 ** "+str(
# math.log(bucket.rangeMax, 2)) + ")"
# for c in bucket.getContacts():
# print " contact " + str(c.id)
# for key, bucket in self.table._replacementCache.iteritems():
# print "Replacement Cache for Bucket " + str(key)
# for c in bucket:
# print " contact " + str(c.id)
| mit | Python | |
0202f0bd4358d68917af19910bb4e1f0a3dda602 | Add usb bridge test(CDC & HID) | archangdcc/avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,Canaan-Creative/Avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,archangdcc/avalon-extras | scripts/avalon-usb2iic-test.py | scripts/avalon-usb2iic-test.py | #!/usr/bin/env python2.7
# This script aim to make a loopback test on cdc or hid.
# The statics is used for comparison, it is not accurate.
from serial import Serial
from optparse import OptionParser
import time
import binascii
import usb.core
import usb.util
import sys
parser = OptionParser()
parser.add_option("-M", "--Mode", dest="run_mode", default="1", help="Run Mode:0-CDC,1-HID; default:0")
(options, args) = parser.parse_args()
LOOP_CNT = 1
def statics(run_mode):
tmp_dat = ""
raw_dat = ""
start = time.time()
for i in range(62):
tmp_dat += '{:02}'.format(i)
for i in range(0, LOOP_CNT):
raw_dat = tmp_dat + '{:02}'.format(64 - (i % 64))
if run_mode == '0':
ser.write(raw_dat.decode('hex'))
res_s = ser.read(64)
else:
hiddev.write(endpout, raw_dat.decode('hex'))
res_s = hiddev.read(endpin, 64, 5000)
if raw_dat != binascii.hexlify(res_s):
print "Failed:" + str(i)
print "TX:" + raw_dat
print "RX:" + binascii.hexlify(res_s)
print "STATICS Begin"
print " Run %s times" %LOOP_CNT
print " Time elapsed: %s" %(time.time() - start)
print "STATICS End"
def enum_usbhid(vendor_id, product_id):
# Find device
hiddev = usb.core.find(idVendor = vendor_id, idProduct = product_id)
if not hiddev:
sys.exit("No Avalon hid dev can be found!")
else:
print "Find an Avalon hid dev"
if hiddev.is_kernel_driver_active(0):
try:
hiddev.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: %s" % str(e))
try:
hiddev.set_configuration()
hiddev.reset()
for endp in hiddev[0][(0,0)]:
if endp.bEndpointAddress & 0x80:
endpin = endp.bEndpointAddress
else:
endpout = endp.bEndpointAddress
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
return hiddev, endpin, endpout
if __name__ == '__main__':
if options.run_mode == '0':
ser = Serial("/dev/ttyACM0", 115200, 8, timeout=1)
else:
hid_vid = 0x1fc9
hid_pid = 0x0081
hiddev, endpin, endpout = enum_usbhid(hid_vid, hid_pid)
statics(options.run_mode)
| unlicense | Python | |
33028c8955e8d1b65416e52169a53b40d9a0dbe4 | add new package : rt-tests (#14174) | LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/rt-tests/package.py | var/spack/repos/builtin/packages/rt-tests/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RtTests(Package):
"""
Suite of real-time tests - cyclictest, hwlatdetect, pip_stress,
pi_stress, pmqtest, ptsematest, rt-migrate-test, sendme, signaltest,
sigwaittest, svsematest.
"""
homepage = "https://git.kernel.org"
url = "https://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git/snapshot/rt-tests-1.2.tar.gz"
version('1.2', sha256='7ccde036059c87681a4b00e7138678d9551b1232113441f6edda31ea45452426')
def install(self, spec, prefix):
install_tree('.', prefix)
| lgpl-2.1 | Python | |
058380775fcfed77373512946c4a32b9df4aac87 | add benchmark for signal.lfilter. Follow-up of gh-4628. | niknow/scipy,e-q/scipy,aeklant/scipy,anielsen001/scipy,bkendzior/scipy,surhudm/scipy,bkendzior/scipy,grlee77/scipy,jjhelmus/scipy,e-q/scipy,jamestwebber/scipy,ilayn/scipy,Gillu13/scipy,josephcslater/scipy,Eric89GXL/scipy,gdooper/scipy,mikebenfield/scipy,mdhaber/scipy,anntzer/scipy,niknow/scipy,ilayn/scipy,befelix/scipy,jamestwebber/scipy,nonhermitian/scipy,anielsen001/scipy,jakevdp/scipy,vigna/scipy,pschella/scipy,woodscn/scipy,surhudm/scipy,Gillu13/scipy,lhilt/scipy,zerothi/scipy,apbard/scipy,sriki18/scipy,argriffing/scipy,Stefan-Endres/scipy,aarchiba/scipy,gdooper/scipy,haudren/scipy,pyramania/scipy,mdhaber/scipy,lhilt/scipy,jjhelmus/scipy,gfyoung/scipy,Gillu13/scipy,matthewalbani/scipy,nonhermitian/scipy,andyfaff/scipy,pschella/scipy,sriki18/scipy,gertingold/scipy,anntzer/scipy,lhilt/scipy,endolith/scipy,sriki18/scipy,dominicelse/scipy,Newman101/scipy,pyramania/scipy,e-q/scipy,larsmans/scipy,woodscn/scipy,person142/scipy,mdhaber/scipy,gertingold/scipy,rgommers/scipy,josephcslater/scipy,e-q/scipy,larsmans/scipy,Newman101/scipy,e-q/scipy,grlee77/scipy,surhudm/scipy,endolith/scipy,matthewalbani/scipy,vigna/scipy,gfyoung/scipy,tylerjereddy/scipy,aeklant/scipy,chatcannon/scipy,Eric89GXL/scipy,jor-/scipy,gertingold/scipy,pyramania/scipy,larsmans/scipy,pbrod/scipy,surhudm/scipy,argriffing/scipy,kleskjr/scipy,lhilt/scipy,larsmans/scipy,aeklant/scipy,pbrod/scipy,woodscn/scipy,anntzer/scipy,pbrod/scipy,maniteja123/scipy,matthewalbani/scipy,vigna/scipy,Eric89GXL/scipy,perimosocordiae/scipy,befelix/scipy,nonhermitian/scipy,jjhelmus/scipy,niknow/scipy,zerothi/scipy,pbrod/scipy,pschella/scipy,Stefan-Endres/scipy,argriffing/scipy,woodscn/scipy,nmayorov/scipy,anntzer/scipy,jjhelmus/scipy,aarchiba/scipy,scipy/scipy,pschella/scipy,behzadnouri/scipy,rgommers/scipy,lhilt/scipy,kleskjr/scipy,nonhermitian/scipy,gertingold/scipy,grlee77/scipy,ilayn/scipy,Stefan-Endres/scipy,bkendzior/scipy,chatcannon/scipy,Gillu13/scipy,endolith/scipy,dominicelse/scipy,Eric89GXL/scipy,jakevdp/scipy,Newman101/scipy,zerothi/scipy,nmayorov/scipy,gdooper/scipy,aeklant/scipy,kalvdans/scipy,befelix/scipy,nmayorov/scipy,surhudm/scipy,WarrenWeckesser/scipy,rgommers/scipy,aarchiba/scipy,arokem/scipy,mdhaber/scipy,pschella/scipy,nmayorov/scipy,dominicelse/scipy,argriffing/scipy,Gillu13/scipy,argriffing/scipy,Stefan-Endres/scipy,chatcannon/scipy,behzadnouri/scipy,haudren/scipy,kleskjr/scipy,mdhaber/scipy,apbard/scipy,gfyoung/scipy,Eric89GXL/scipy,ilayn/scipy,jakevdp/scipy,anntzer/scipy,dominicelse/scipy,maniteja123/scipy,pyramania/scipy,kleskjr/scipy,andyfaff/scipy,jjhelmus/scipy,WarrenWeckesser/scipy,WarrenWeckesser/scipy,Stefan-Endres/scipy,matthew-brett/scipy,matthew-brett/scipy,arokem/scipy,zerothi/scipy,arokem/scipy,aarchiba/scipy,niknow/scipy,aeklant/scipy,haudren/scipy,jakevdp/scipy,endolith/scipy,anielsen001/scipy,dominicelse/scipy,apbard/scipy,gertingold/scipy,jakevdp/scipy,matthew-brett/scipy,mdhaber/scipy,pizzathief/scipy,larsmans/scipy,mikebenfield/scipy,vigna/scipy,gdooper/scipy,sriki18/scipy,niknow/scipy,tylerjereddy/scipy,chatcannon/scipy,apbard/scipy,mikebenfield/scipy,behzadnouri/scipy,maniteja123/scipy,rgommers/scipy,vigna/scipy,zerothi/scipy,chatcannon/scipy,josephcslater/scipy,niknow/scipy,arokem/scipy,haudren/scipy,nonhermitian/scipy,perimosocordiae/scipy,jor-/scipy,person142/scipy,josephcslater/scipy,zerothi/scipy,maniteja123/scipy,andyfaff/scipy,WarrenWeckesser/scipy,pbrod/scipy,andyfaff/scipy,tylerjereddy/scipy,jamestwebber/scipy,WarrenWeckesser/scipy,andyfaff/scipy,behzadnouri/scipy,mikebenfield/scipy,surhudm/scipy,jor-/scipy,ilayn/scipy,anielsen001/scipy,matthewalbani/scipy,jor-/scipy,kalvdans/scipy,larsmans/scipy,scipy/scipy,WarrenWeckesser/scipy,jamestwebber/scipy,perimosocordiae/scipy,scipy/scipy,nmayorov/scipy,perimosocordiae/scipy,pizzathief/scipy,perimosocordiae/scipy,endolith/scipy,kalvdans/scipy,jor-/scipy,pizzathief/scipy,gdooper/scipy,aarchiba/scipy,scipy/scipy,tylerjereddy/scipy,behzadnouri/scipy,pizzathief/scipy,woodscn/scipy,grlee77/scipy,Newman101/scipy,sriki18/scipy,Newman101/scipy,maniteja123/scipy,sriki18/scipy,ilayn/scipy,woodscn/scipy,perimosocordiae/scipy,behzadnouri/scipy,argriffing/scipy,josephcslater/scipy,maniteja123/scipy,haudren/scipy,befelix/scipy,scipy/scipy,person142/scipy,anielsen001/scipy,matthew-brett/scipy,chatcannon/scipy,anntzer/scipy,pizzathief/scipy,rgommers/scipy,bkendzior/scipy,scipy/scipy,endolith/scipy,Eric89GXL/scipy,gfyoung/scipy,Newman101/scipy,haudren/scipy,person142/scipy,kalvdans/scipy,matthewalbani/scipy,Stefan-Endres/scipy,Gillu13/scipy,kleskjr/scipy,anielsen001/scipy,befelix/scipy,pbrod/scipy,jamestwebber/scipy,tylerjereddy/scipy,gfyoung/scipy,kleskjr/scipy,pyramania/scipy,andyfaff/scipy,arokem/scipy,matthew-brett/scipy,person142/scipy,apbard/scipy,bkendzior/scipy,grlee77/scipy,kalvdans/scipy,mikebenfield/scipy | benchmarks/benchmarks/signal_filtering.py | benchmarks/benchmarks/signal_filtering.py | from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.signal import lfilter, firwin
except ImportError:
pass
from .common import Benchmark
class Lfilter(Benchmark):
param_names = ['n_samples', 'numtaps']
params = [
[1e3, 50e3, 1e6],
[9, 23, 51]
]
def setup(self, n_samples, numtaps):
np.random.seed(125678)
sample_rate = 25000.
t = np.arange(n_samples, dtype=np.float64) / sample_rate
nyq_rate = sample_rate / 2.
cutoff_hz = 3000.0
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*11e3*t)
self.coeff = firwin(numtaps, cutoff_hz/nyq_rate)
def time_lfilter(self, n_samples, numtaps):
lfilter(self.coeff, 1.0, self.sig)
| bsd-3-clause | Python | |
fd838bdb57d126464042e074a80148beb8a3cd01 | Create rob_client.py | carlfsmith/LunaBot,carlfsmith/LunaBot,carlfsmith/LunaBot,carlfsmith/LunaBot,carlfsmith/LunaBot | Control/rob_client.py | Control/rob_client.py | # -----------------------
# Author: Alex Anderson
# Purpose: Send commands to server on robot
# with IP address of self.HOST based
# on the activity of the hat/D-pad of
# a joystick/gamepad
# Date: 11/20/13
# ---------------------------
import socket
from gamepad_manager import * #gampad_manager must be in the same directory as this script
# Takes joystick/gamepad input and
#sends the appropriate signals to the
#robot with the Robot_Client class
class Robot_Commander:
def __init__(self, port=3612):
self.bot_link = Robot_Client(port)
self.bot_link.connect()
self.drive_direction = Drive_Signals()
self.gm = GamepadManager()
def drive(self, direction):
if self.drive_direction != direction:
self.drive_direction = direction
self.bot_link.send_command(str(direction))
def main(self):
num_sticks = self.gm.get_num_sticks()
if num_sticks == 0:
found = False
while found == False:
print "Now joysticks/gamepads could be found :( Look again? (y/n)? ",
if raw_input() == 'y':
num_sticks = self.gm.search()
if num_sticks > 0:
found = True
else:
print "Are you sure you want to exit the client? (y/n) ",
if raw_input() == 'y':
return
#Ask user to choose a gamepad/joystick
print "Select which Gamepad/Joystick to use."
i = 0
while i < num_sticks:
print "\t%d: %s" % (i, self.gm.get_stick_name(i))
i = i + 1
#Ensure that the id number is a valid number
stk_id = int(raw_input())
while stk_id < 0 or stk_id > (num_sticks-1):
print "That isn't a valid id number. Try again ",
stk_id = int(raw_input())
#main control loop
drive_dir = self.drive_direction
while True:
self.gm.update_events()
#decide which way to drive
hat_vect = self.gm.get_hat(0, stk_id)
if hat_vect == (0,0): #stop
drive_dir = Drive_Signals.STOP
elif hat_vect == (0,1): #forward
drive_dir = Drive_Signals.FORWARD
elif hat_vect == (0,-1): #backward
drive_dir = Drive_Signals.BACKWARD
elif hat_vect == (1,0): #right
drive_dir = Drive_Signals.RIGHT
elif hat_vect == (-1,0): #left
drive_dir = Drive_Signals.LEFT
if self.gm.get_button(6, stk_id) == True and self.gm.get_button(7, stk_id) == True:
break
self.drive(drive_dir)
def __del__(self):
self.bot_link.disconnect()
# Creates/Maintains connection with server on robot
class Robot_Client:
def __init__(self, port=3612):
self.HOST = 'localhost' #IP address of robot/server
self.PORT = port
self.SIZE = 1024
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.robot_connected = False
def connect(self):
#Creates connection with the robot
print "Connecting to robot...",
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.HOST, self.PORT))
self.robot_connected = True
print "\tConnected!"
except Exception as e:
print "\tConnection failed!"
print e
def send_command(self, cmd):
#if robot is connected send a command
if self.robot_connected:
try:
self.socket.send(cmd)
except:
self.disconnect_robot()
def disconnect(self):
#Closes the connection with the robot
print "Disconnecting...",
if self.robot_connected == True:
self.socket.send("Take the highway")
self.socket.close()
self.robot_connected = False
print "\tDisconnected!"
# Contains variables which hold the signals
#to be sent to the robot for drive direction
class Drive_Signals:
STOP = 'Z'
FORWARD = 'W'
BACKWARD = 'S'
LEFT = 'A'
RIGHT = 'D'
# Only perform this if execution
#started in this script
if __name__ == "__main__":
rc = Robot_Commander(3612)
rc.main()
| apache-2.0 | Python | |
4c925c6134e705dd5f304a983c66ad60ce6bf901 | add diagnostic | akrherz/idep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/dep,akrherz/idep | scripts/plots/huc12_flowpath_counts.py | scripts/plots/huc12_flowpath_counts.py | """Diagnostic on HUC12 flowpath counts."""
import numpy as np
import cartopy.crs as ccrs
from matplotlib.patches import Polygon
import matplotlib.colors as mpcolors
from geopandas import read_postgis
from pyiem.util import get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.plot.geoplot import MapPlot
def main():
"""Go Main Go."""
pgconn = get_dbconn('idep')
df = read_postgis("""
select f.huc_12, count(*) as fps,
st_transform(h.simple_geom, 4326) as geo
from flowpaths f JOIN huc12 h on
(f.huc_12 = h.huc_12) WHERE f.scenario = 0 and h.scenario = 0
GROUP by f.huc_12, geo ORDER by fps ASC
""", pgconn, index_col=None, geom_col='geo')
bins = np.arange(1, 42, 2)
cmap = plt.get_cmap('copper')
cmap.set_over('white')
cmap.set_under('thistle')
norm = mpcolors.BoundaryNorm(bins, cmap.N)
mp = MapPlot(
continentalcolor='thistle', nologo=True,
sector='custom',
south=36.8, north=45.0, west=-99.2, east=-88.9,
subtitle='',
title=('DEP HUCs with <40 Flowpaths (%.0f/%.0f %.2f%%)' % (
len(df[df['fps'] < 40].index), len(df.index),
len(df[df['fps'] < 40].index) / len(df.index) * 100.
)))
for _i, row in df.iterrows():
c = cmap(norm([row['fps'], ]))[0]
arr = np.asarray(row['geo'].exterior)
points = mp.ax.projection.transform_points(
ccrs.Geodetic(), arr[:, 0], arr[:, 1])
p = Polygon(points[:, :2], fc=c, ec='None', zorder=2, lw=0.1)
mp.ax.add_patch(p)
mp.drawcounties()
mp.draw_colorbar(
bins, cmap, norm,
title='Count')
mp.postprocess(filename='/tmp/huc12_cnts.png')
# gdf = df.groupby('fps').count()
# gdf.columns = ['count', ]
# gdf['cumsum'] = gdf['count'].cumsum()
# gdf['percent'] = gdf['cumsum'] / gdf['count'].sum() * 100.
# gdf.to_csv('/tmp/huc12_flowpath_cnts.csv')
if __name__ == '__main__':
main()
| mit | Python | |
3a285bcb6bf06df1d0e8f60a8e754280a9f05dd9 | Copy arp_request.py to arp_fake.py. | bluhm/arp-regress | arp_fake.py | arp_fake.py | #!/usr/local/bin/python2.7
# send Address Resolution Protocol Request
# expect Address Resolution Protocol response and check all fields
# RFC 826 An Ethernet Address Resolution Protocol
# Packet Generation
import os
from addr import *
from scapy.all import *
arp=ARP(op='who-has', hwsrc=LOCAL_MAC, psrc=LOCAL_ADDR,
hwdst="ff:ff:ff:ff:ff:ff", pdst=REMOTE_ADDR)
eth=Ether(src=LOCAL_MAC, dst="ff:ff:ff:ff:ff:ff")/arp
e=srp1(eth, iface=LOCAL_IF, timeout=2)
if e and e.type == ETH_P_ARP:
a=e.payload
if a.hwtype != ARPHDR_ETHER:
print "HWTYPE=%#0.4x != ARPHDR_ETHER" % (a.hwtype)
exit(1)
if a.ptype != ETH_P_IP:
print "PTYPE=%#0.4x != ETH_P_IP" % (a.ptype)
exit(1)
if a.hwlen != 6:
print "HWLEN=%d != 6" % (a.hwlen)
exit(1)
if a.plen != 4:
print "PLEN=%d != 4" % (a.plen)
exit(1)
if a.op != 2:
print "OP=%s != is-at" % (a.op)
exit(1)
if a.hwsrc != REMOTE_MAC:
print "HWLOCAL=%s != REMOTE_MAC" % (a.hwsrc)
exit(1)
if a.psrc != REMOTE_ADDR:
print "PLOCAL=%s != REMOTE_ADDR" % (a.psrc)
exit(1)
if a.hwdst != LOCAL_MAC:
print "HWREMOTE=%s != LOCAL_MAC" % (a.hwdst)
exit(1)
if a.pdst != LOCAL_ADDR:
print "PREMOTE=%s != LOCAL_ADDR" % (a.pdst)
exit(1)
print "arp reply"
exit(0)
print "NO ARP REPLY"
exit(2)
| isc | Python | |
05635750a53a5a6f72a90eecea74a08e11a11ca8 | Add 37signals products | foauth/foauth.org,foauth/foauth.org,foauth/foauth.org,foauth/oauth-proxy | services/thirtysevensignals.py | services/thirtysevensignals.py | from werkzeug.urls import url_decode
import foauth.providers
class ThirtySevenSignals(foauth.providers.OAuth2):
# General info about the provider
alias = '37signals'
name = '37signals'
provider_url = 'https://37signals.com/'
docs_url = 'https://github.com/37signals/api'
category = 'Productivity'
# URLs to interact with the API
authorize_url = 'https://launchpad.37signals.com/authorization/new?type=web_server'
access_token_url = 'https://launchpad.37signals.com/authorization/token?type=web_server'
api_domains = [
'launchpad.37signals.com',
'basecamp.com',
'campfire.com',
'highrisehq.com',
]
available_permissions = [
(None, 'access your information'),
]
def get_user_id(self, key):
r = self.api(key, self.api_domains[0], u'/authorization.json')
return unicode(r.json[u'identity'][u'id'])
| bsd-3-clause | Python | |
470085c992a393522a13a5b6d8243f30fff57a80 | add activate_this.py | N4NU/mn-darts,N4NU/mn-darts | env/Scripts/activate_this.py | env/Scripts/activate_this.py | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| mit | Python | |
7e3fb49043503dc6aa5375c4e27ea770052e615c | Add audiofile_source example | kivy/audiostream,kivy/audiostream,kivy/audiostream | examples/audiofile_source.py | examples/audiofile_source.py | import sys
from time import sleep
import librosa
import numpy as np
from audiostream import get_output
from audiostream.sources.thread import ThreadSource
class MonoAmplitudeSource(ThreadSource):
"""A data source for float32 mono binary data, as loaded by libROSA/soundfile."""
def __init__(self, stream, data, *args, **kwargs):
super().__init__(stream, *args, **kwargs)
self.chunksize = kwargs.get('chunksize', 64)
self.data = data
self.cursor = 0
def get_bytes(self):
chunk = self.data[self.cursor:self.cursor+self.chunksize]
self.cursor += self.chunksize
if not isinstance(chunk, np.ndarray):
chunk = np.array(chunk)
assert len(chunk.shape) == 1 and chunk.dtype == np.dtype('float32')
# Convert to 16 bit format.
return (chunk * 2**15).astype('int16').tobytes()
# For example purposes, load first 30 seconds only.
# Data must be mono.
data, sr = librosa.core.load(sys.argv[1], mono=True, sr=None, duration=30)
stream = get_output(channels=1, rate=sr, buffersize=1024)
source = MonoAmplitudeSource(stream, data)
source.start()
# Wait until playback has finished.
while source.cursor < len(data):
sleep(.5)
source.stop()
| mit | Python | |
9dbd9d7b409afe18677a69d72339d040043a9087 | add urls to the audio appl | armstrong/armstrong.apps.audio | armstrong/apps/audio/urls.py | armstrong/apps/audio/urls.py | from django.conf.urls.defaults import *
from armstrong.apps.audio import views as AudioViews
urlpatterns = patterns('',
url(r'^$', AudioViews.AudioPublicationList.as_view(),
name='audio_list'),
url(r'^upload/$', AudioViews.AudioPublicationCreateView.as_view(),
name='audio_upload'),
url(r'^(?P<slug>[-\w]+)/$', AudioViews.AudioPublicationDetail.as_view(),
name='audio_detail'),
)
| apache-2.0 | Python | |
914f06c999f5c540c2feb6ab8825c1ced4246ed2 | Support sessions | drgarcia1986/muffin,klen/muffin | muffin/plugins/session.py | muffin/plugins/session.py | import base64
import hashlib
import hmac
import time
import asyncio
import ujson as json
from . import BasePlugin
class SessionPlugin(BasePlugin):
""" Support sessions. """
name = 'session'
defaults = {
'secret': 'InsecureSecret',
}
def setup(self, app):
""" Initialize the application. """
super().setup(app)
if self.options['secret'] == 'InsecureSecret':
app.logger.warn('Use insecure secret key. Change AUTH_SECRET option in configuration.')
@asyncio.coroutine
def middleware_factory(self, app, handler):
@asyncio.coroutine
def middleware(request):
request.session = Session(self.options['secret'])
request.session.load(request.cookies)
app.logger.debug('Started: %s', request.session)
response = yield from handler(request)
app.logger.debug('Ended: %s', request.session)
request.session.save(response.set_cookie)
return response
return middleware
class Session(dict):
encoding = 'UTF-8'
def __init__(self, secret, key='session.id', **params):
self.secret = secret
self.key = key
self.params = params
self.store = {}
def save(self, set_cookie):
if set(self.store.items()) ^ set(self.items()):
value = dict(self.items())
value = json.dumps(value)
value = self.encrypt(value)
if not isinstance(value, str):
value = value.encode(self.encoding)
set_cookie(self.key, value, **self.params)
def load(self, cookies, **kwargs):
value = cookies.get(self.key, None)
if value is None:
return False
data = json.loads(self.decrypt(value))
if not isinstance(data, dict):
return False
self.store = data
self.update(self.store)
def create_signature(self, value, timestamp):
h = hmac.new(self.secret.encode(), digestmod=hashlib.sha1)
h.update(timestamp)
h.update(value)
return h.hexdigest()
def encrypt(self, value):
timestamp = str(int(time.time())).encode()
value = base64.b64encode(value.encode(self.encoding))
signature = self.create_signature(value, timestamp)
return "|".join([value.decode(self.encoding), timestamp.decode(self.encoding), signature])
def decrypt(self, value):
value, timestamp, signature = value.split("|")
check = self.create_signature(value.encode(self.encoding), timestamp.encode())
if check != signature:
return None
return base64.b64decode(value).decode(self.encoding)
| mit | Python | |
977ec87292c10e21b22f9fe77b248ee83a87147d | Add basic tests | ijks/textinator | tests/tests.py | tests/tests.py | import unittest
from textinator import calculate_size
class CalculateSizeTestCase(unittest.TestCase):
"""Tests for calculate_size()"""
def test_width_no_height(self):
self.assertEqual(calculate_size((1920, 1080), (20, None)),
(20, 11))
self.assertEqual(calculate_size((500, 1240), (200, None)),
(200, 496))
def test_height_no_width(self):
self.assertEqual(calculate_size((1024, 768), (None, 413)),
(551, 413))
self.assertEqual(calculate_size((10, 670), (None, 800)),
(12, 800))
def test_height_and_width(self):
self.assertEqual(calculate_size((500, 600), (42, 612)),
(42, 612))
if __name__ == '__main__':
unittest.main()
| mit | Python | |
a36606beffbd65dc5e09e89aea312e6be2552be0 | Create sort_date.py | jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi | apps/camera/file_sweeper/sort_date.py | apps/camera/file_sweeper/sort_date.py |
#-*-coding:utf8-*-
#!/usr/bin/python
# Author : Jeonghoonkang, github.com/jeonghoonkang
| bsd-2-clause | Python | |
2fa7048351f249b3731d15e04cfb917083074eca | add arabic morphological analysis demo | motazsaad/comparable-text-miner | arabic-morphological-analysis-demo.py | arabic-morphological-analysis-demo.py | # coding: utf-8
import sys
##################################################################
def usage():
print 'Usage: ', sys.argv[0], '<inputfile> <outputfile>'
##################################################################
if len(sys.argv) < 3: usage(); sys.exit(2)
'''
Demo of Arabic morphological analysis tools, which are as follows:
- Text processing : remove Arabic diacritics and punctcutions, and tokenizing
- Rooting (ISRI Arabic Stemmer)
- light stemming
ISRI Arabic Stemmer is described in:
Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary. Information Science Research Institute. University of Nevada, Las Vegas, USA.
The difference between ISRI Arabic Stemmer and The Khoja stemmer is that ISRI stemmer does not use root dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than returning the original unmodified word.
Light stemming for Arabic words is to remove common affix (prefix and suffix) from words, but it does not convert words into their root form.
Example Usage:
python arabic-morphological-analysis-demo.py test-text-files/test-in.ar.txt test-text-files/test-out.ar.txt
'''
import imp
tp = imp.load_source('textpro', 'textpro.py')
def main(argv):
inputfile = sys.argv[1]
outputfile = sys.argv[2]
text = open(inputfile).read().decode('utf-8')
word_list = tp.process_text(text) # remove diacritics and punctcutions, stopwords, and tokenize text
roots = tp.getRootAr(word_list) # apply rooting algorithm
lightStems = tp.lightStemAr(word_list) # apply light stemming
output_text = 'apply rooting\n' + roots + '\n\n=================\napply light stemming\n' + lightStems
output = open(outputfile, 'w')
print>>output, output_text.encode('utf-8')
output.close()
##################################################################
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | Python | |
46200af23e98aefbde2140cd63d850d7ae5c5632 | Create EuclideanAlgorithm.py | Chuck8521/LunchtimeBoredom,Chuck8521/LunchtimeBoredom,Chuck8521/LunchtimeBoredom | EuclideanAlgorithm.py | EuclideanAlgorithm.py | trueA = input('Larger number: ')
trueB = input('Smaller number: ')
a = trueA
b = trueB
while a % b != 0:
oldA = a
equation = str(a) + ' = ' + str(a/b) + ' * ' + str(b) + ' + ' + str(a%b)
print(equation)
a = b
b = oldA % b
gcf = b
print('The GCF is: ' + str(gcf))
| mit | Python | |
e9ed96d606e2fc8db1e9e36978b48d725f925bb0 | Add octogit.__main__ for testing | myusuf3/octogit | octogit/__main__.py | octogit/__main__.py | from . import cli
cli.begin()
| mit | Python | |
c10d60911d870910d389668f6c99df694e9d914c | add registry test | dannygoldstein/sncosmo,sncosmo/sncosmo,dannygoldstein/sncosmo,kbarbary/sncosmo,sncosmo/sncosmo,kbarbary/sncosmo,sncosmo/sncosmo,dannygoldstein/sncosmo,kbarbary/sncosmo | sncosmo/tests/test_registry.py | sncosmo/tests/test_registry.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test registry functions."""
import numpy as np
import sncosmo
def test_register():
disp = np.array([4000., 4200., 4400., 4600., 4800., 5000.])
trans = np.array([0., 1., 1., 1., 1., 0.])
band = sncosmo.Bandpass(disp, trans, name='tophatg')
sncosmo.registry.register(band)
band2 = sncosmo.get_bandpass('tophatg')
# make sure we can get back the band we put it.
assert band2 is band
| bsd-3-clause | Python | |
872dcfd02b9c136d781abf15df38c71a234f82c0 | remove unneeded sleep | Hutspace/odekro,mysociety/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,mysociety/pombola,Hutspace/odekro,hzj123/56th,Hutspace/odekro,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,ken-muturi/pombola,Hutspace/odekro,Hutspace/odekro,mysociety/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola | mzalendo/hansard/tests.py | mzalendo/hansard/tests.py | import os
import datetime
import time
from django.test import TestCase
from hansard.models import Source
class HansardTest(TestCase):
def setUp(self):
source = Source(
name = 'Test Source',
url = 'http://www.mysociety.org/robots.txt',
date = datetime.date( 2001, 11, 14),
)
source.save()
self.source = source
def test_source_file(self):
source = self.source
self.assertEqual( source.name, 'Test Source')
# get where the file will be stored
cache_file_path = source.cache_file_path()
# check that the file does not exist
self.assertFalse( os.path.exists( cache_file_path ))
# retrieve and check it does
self.assertTrue( len( source.file().read() ) )
self.assertTrue( os.path.exists( cache_file_path ))
# change file, retrieve again and check we get cached version
self.assertTrue( os.path.exists( cache_file_path ))
added_text = "some random testing nonsense"
self.assertFalse( added_text in source.file().read() )
with open(cache_file_path, "a") as append_to_me:
append_to_me.write("\n\n" + added_text + "\n\n")
self.assertTrue( added_text in source.file().read() )
# delete the object - check cache file gone to
source.delete()
self.assertFalse( os.path.exists( cache_file_path ))
# check that the object is gone
self.assertFalse( Source.objects.filter(name="Test Source").count() )
def test_source_file_404(self):
source = self.source
source.url = source.url + 'xxx'
self.assertEqual( source.file(), None )
self.assertFalse( os.path.exists( source.cache_file_path() ))
| import os
import datetime
import time
from django.test import TestCase
from hansard.models import Source
class HansardTest(TestCase):
def setUp(self):
source = Source(
name = 'Test Source',
url = 'http://www.mysociety.org/robots.txt',
date = datetime.date( 2001, 11, 14),
)
source.save()
self.source = source
def test_source_file(self):
source = self.source
self.assertEqual( source.name, 'Test Source')
# get where the file will be stored
cache_file_path = source.cache_file_path()
# check that the file does not exist
self.assertFalse( os.path.exists( cache_file_path ))
# retrieve and check it does
self.assertTrue( len( source.file().read() ) )
self.assertTrue( os.path.exists( cache_file_path ))
# change file, retrieve again and check we get cached version
self.assertTrue( os.path.exists( cache_file_path ))
added_text = "some random testing nonsense"
self.assertFalse( added_text in source.file().read() )
with open(cache_file_path, "a") as append_to_me:
append_to_me.write("\n\n" + added_text + "\n\n")
self.assertTrue( added_text in source.file().read() )
# delete the object - check cache file gone to
source.delete()
time.sleep(2) # let things get synced
self.assertFalse( os.path.exists( cache_file_path ))
# check that the object is gone
self.assertFalse( Source.objects.filter(name="Test Source").count() )
def test_source_file_404(self):
source = self.source
source.url = source.url + 'xxx'
self.assertEqual( source.file(), None )
self.assertFalse( os.path.exists( source.cache_file_path() ))
| agpl-3.0 | Python |
910358e7cf3d8b996593c5d8eedf5c97bd0c6b67 | Test template for Monitor | Azure/azure-sdk-for-python,Azure/azure-sdk-for-python,AutorestCI/azure-sdk-for-python,rjschwei/azure-sdk-for-python,SUSE/azure-sdk-for-python,v-iam/azure-sdk-for-python,Azure/azure-sdk-for-python,Azure/azure-sdk-for-python,lmazuel/azure-sdk-for-python | azure-mgmt/tests/test_mgmt_monitor.py | azure-mgmt/tests/test_mgmt_monitor.py | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.monitor
import azure.monitor
from msrest.version import msrest_version
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
class MgmtMonitorTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtMonitorTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.monitor.InsightsManagementClient
)
self.data_client = self.create_mgmt_client(
azure.monitor.InsightsClient
)
if not self.is_playback():
self.create_resource_group()
@record
def test_basic(self):
account_name = self.get_resource_name('pymonitor')
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit | Python | |
cf4829ae46ef994c9f558dcc2f08743833b8a6fb | Add mineTweets.py | traxex33/Twitter-Analysis | mineTweets.py | mineTweets.py | import json
import tweepy
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
#App credentials
consumer_key = ""
consumer_secret = ""
access_token = ""
access_secret = ""
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
#Listening to Live Tweets
class MyListener(StreamListener):
def on_data(self, data):
try:
with open('live_stream.json', 'a') as f:
f.write(data)
return True
except BaseException as e:
print "Error"
return True
def on_error(self, status_code):
print status_code
if status_code == 420:
print "Connection not established"
return False
#Tracking live tweets with keyword "tracking"
''''
twitter_stream = Stream(auth, MyListener())
twitter_stream.filter(track = ['tracking'])
'''
#Gathering tweets from a user with screen_name = "screen_name"
for status in tweepy.Cursor(api.user_timeline, screen_name = "screen_name").items(200):
with open("screen_name_tweets.json", "a") as f:
json.dump(dict(status._json), f)
f.write('\n')
| mit | Python | |
0d56fb071628dc3f33c90d7f806371c076303551 | add autoGonk -- hopefully our self-determining "brain" to automate this | sodaphish/break,sodaphish/break | autoGonk.py | autoGonk.py | """
autoGonk -- testing out auto-determining which interfaces serve which function
"""
class ArpTable():
arpTable = {}
pass
class RouteTable():
routeTable = {}
pass
class IP():
pass | mit | Python | |
9d7d043a36f6e5a2fc599287a087eb806a58d73a | test to_xml survey | nukru/Swarm-Surveys,nukru/projectQ,nukru/projectQ,nukru/projectQ,nukru/Swarm-Surveys,nukru/Swarm-Surveys,nukru/projectQ | testXml.py | testXml.py | from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
import xml.etree.cElementTree as ET
from app import db, models
from app.models import Section, Survey, Consent, Question, QuestionText, QuestionLikertScale
def surveyXml(surveyData):
survey = Element('survey')
title = SubElement(survey,'title')
title.text = surveyData.title
description = SubElement(survey,'description')
description.text = surveyData.description
startDate = SubElement(survey,'startDate')
startDate.text = str(surveyData.startDate)
endDate = SubElement(survey,'endDate')
endDate.text = str(surveyData.endDate)
maxNumberRespondents = SubElement(survey,'maxNumberRespondents')
maxNumberRespondents.text = str(surveyData.maxNumberRespondents)
consents = Element('consents')
for consent in surveyData.consents:
#SubElement (consents,consentXml(consent))
consents.append(consentXml(consent))
survey.append(consents)
for s in surveyData.sections:
survey.append(sectionXml(s))
tree = ET.ElementTree(survey)
#tree = ET.ElementTree(consents)
return tree
def consentXml(consentData):
consent = Element('consent')
consent.text = consentData.text
return consent
def sectionXml(sectionData):
section = Element('section')
title = SubElement(section,'title')
title.text = sectionData.title
description = SubElement(section,'description')
description.text = sectionData.description
sequence = SubElement(section,'sequence')
sequence.text = str(sectionData.sequence)
percent = SubElement(section,'percent')
percent.text = str(sectionData.percent)
for q in sectionData.questions:
section.append(questionXml(q))
for s in sectionData.children:
section.append(sectionXml(s))
return section
def questionXml(questionData):
question = Element('question')
type = SubElement(question,'type')
type.text = questionData.type
text = SubElement(question,'text')
text.text = questionData.text
required = SubElement(question,'required')
required.text = str(questionData.required)
if questionData.choices !=None:
for choice in questionData.choices:
c = SubElement(question,'choice')
c.text = choice
expectedAnswer = SubElement(question,'expectedAnswer')
expectedAnswer.text = questionData.expectedAnswer
maxNumberAttempt = SubElement(question,'maxNumberAttempt')
maxNumberAttempt.text = questionData.maxNumberAttempt
if isinstance (questionData, QuestionText):
isNumber = SubElement(question,'isNumber')
isNumber.text = str(questionData.isNumber)
regularExpression = SubElement(question,'regularExpression')
regularExpression.text = regularExpression.text
errorMessage = SubElement(question,'errorMessage')
errorMessage.text = questionData.errorMessage
if isinstance (questionData, QuestionLikertScale):
minLikert = SubElement(question,'minLikert')
minLikert.text = str(questionData.minLikert)
maxLikert = SubElement(question,'maxLikert')
maxLikert.text = str(questionData.maxLikert)
labelMin = SubElement(question,'labelMin')
labelMin.text = questionData.labelMin
labelMax = SubElement(question,'labelMax')
labelMax.text = questionData.labelMax
return question | apache-2.0 | Python | |
3d35a84d92123fcf530cb366d60de0f2c45d1c18 | test python interpreter | shujunqiao/cocos2d-python,shujunqiao/cocos2d-python,dangillet/cocos,shujunqiao/cocos2d-python,vyscond/cocos | test/test_interpreter_layer.py | test/test_interpreter_layer.py | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
import pyglet
if __name__ == "__main__":
director.init()
interpreter_layer = cocos.layer.InterpreterLayer()
main_scene = cocos.scene.Scene(interpreter_layer)
director.run(main_scene)
| bsd-3-clause | Python | |
1d089833b47fe740d6dfaea89f94b1c9d1946c1e | enable evented based on socket | laslabs/odoo,Elico-Corp/odoo_OCB,hip-odoo/odoo,ygol/odoo,laslabs/odoo,dfang/odoo,laslabs/odoo,bplancher/odoo,dfang/odoo,hip-odoo/odoo,Elico-Corp/odoo_OCB,laslabs/odoo,Elico-Corp/odoo_OCB,ygol/odoo,hip-odoo/odoo,laslabs/odoo,bplancher/odoo,bplancher/odoo,hip-odoo/odoo,ygol/odoo,ygol/odoo,Elico-Corp/odoo_OCB,dfang/odoo,bplancher/odoo,dfang/odoo,hip-odoo/odoo,bplancher/odoo,dfang/odoo,laslabs/odoo,ygol/odoo,dfang/odoo,ygol/odoo,ygol/odoo,bplancher/odoo,hip-odoo/odoo,Elico-Corp/odoo_OCB,Elico-Corp/odoo_OCB | openerp/__init__.py | openerp/__init__.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
def is_server_running_with_gevent():
import sys
if sys.modules.get("gevent") is not None:
import socket
import gevent.socket
if socket.socket is gevent.socket.socket:
# gevent is loaded and activated
return True
return False
evented = is_server_running_with_gevent()
# Is the server running in pefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in pefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
| agpl-3.0 | Python |
74a22516a368d98c8c71818c39d84208c9ecca66 | add buildbot.py | steinwurf/hex,steinwurf/hex | buildbot.py | buildbot.py | #!/usr/bin/env python
# encodingsak: utf-8
import os
import sys
import json
import subprocess
project_name = 'hex'
def run_command(args):
print("Running: {}".format(args))
sys.stdout.flush()
subprocess.check_call(args)
def get_tool_options(properties):
options = []
if 'tool_options' in properties:
# Make sure that the values are correctly comma separated
for key, value in properties['tool_options'].items():
if value is None:
options += ['--{0}'.format(key)]
else:
options += ['--{0}={1}'.format(key, value)]
return options
def configure(properties):
command = [sys.executable, 'waf']
if properties.get('build_distclean'):
command += ['distclean']
command += ['configure', '--git-protocol=git@']
if 'waf_bundle_path' in properties:
command += ['--bundle-path=' + properties['waf_bundle_path']]
if 'dependency_project' in properties:
command += ['--{0}-use-checkout={1}'.format(
properties['dependency_project'],
properties['dependency_checkout'])]
command += ["--cxx_mkspec={}".format(properties['cxx_mkspec'])]
command += get_tool_options(properties)
run_command(command)
def build(properties):
command = [sys.executable, 'waf', 'build', '-v']
run_command(command)
def run_tests(properties):
command = [sys.executable, 'waf', '-v', '--run_tests']
run_cmd = None
if properties.get('valgrind_run'):
run_cmd = 'valgrind --error-exitcode=1 %s'
if run_cmd:
command += ["--run_cmd={}".format(run_cmd)]
command += get_tool_options(properties)
run_command(command)
def install(properties):
command = [sys.executable, 'waf', '-v', 'install']
if 'install_path' in properties:
command += ['--install_path={0}'.format(properties['install_path'])]
if properties.get('install_relative'):
command += ['--install_relative']
run_command(command)
def coverage_settings(options):
options['required_line_coverage'] = 100.0
def main():
argv = sys.argv
if len(argv) != 3:
print("Usage: {} <command> <properties>".format(argv[0]))
sys.exit(0)
cmd = argv[1]
properties = json.loads(argv[2])
if cmd == 'configure':
configure(properties)
elif cmd == 'build':
build(properties)
elif cmd == 'run_tests':
run_tests(properties)
elif cmd == 'install':
install(properties)
else:
print("Unknown command: {}".format(cmd))
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
0bc48dc1dd66178bec8d9bb3dc86a26baad240ee | use module-level function instead of a factory class | tumluliu/rap | rap/servicefactory.py | rap/servicefactory.py | """ Factory of RoutingService classes """
import json
from .mb import MapboxRouter
from .graphhopper import GraphHopperRouter
# from . import mapzen
# from . import google
# from . import here
# from . import tomtom
""" Factory method of creating concrete routing service instances
"""
VALID_ROUTING_SERVICES = [
'mapbox', 'graphhopper', 'mapzen', 'google', 'here', 'tomtom'
]
def RoutingServiceFactory(service_name):
if service_name not in VALID_ROUTING_SERVICES:
# TODO(lliu): Here should throw an exception or deal with it in an
# identical way
return None
with open('../apikeys.json', 'r') as f:
service_provider_conf = json.load(f)
# TODO(lliu): The following stuff of course should be prettified
# ATTENTION!! A pile of ugly things are coming...
if service_name == 'mapbox':
return MapboxRouter(service_provider_conf['mapbox']['key'])
elif service_name == 'graphhopper':
return GraphHopperRouter(service_provider_conf['graphhopper']['key'])
| mit | Python | |
162e7dd6595b0d9303ecb1da66893ee353ba413b | Add a first small file watcher that counts words | Kadrian/paper-gamification | tracker.py | tracker.py | import os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileModifiedEvent
class GamificationHandler(FileSystemEventHandler):
def __init__(self, filename):
FileSystemEventHandler.__init__(self)
self.filename = filename
def on_modified(self, event):
if type(event) == FileModifiedEvent:
if os.path.abspath(self.filename) == event.src_path:
self.do_gamification()
def do_gamification(self):
f = open(self.filename)
# Count words
num_words = 0
for line in f.readlines():
words = line.split(" ")
for w in words:
if w.strip() != "" and w.isalnum():
num_words += 1
logging.info("Total num of words: " + str(num_words))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if len(sys.argv) != 2:
print "Please supply a file to be watched"
sys.exit()
filename = sys.argv[1]
path = os.path.dirname(os.path.abspath(filename))
# Observer setup + start
event_handler = GamificationHandler(filename)
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | mit | Python | |
6c10f4d98443b23424a01659afd5194c3edff141 | Add conflict inspector script | danmichaelo/wikidata_labels_nb_no,danmichaelo/wikidata_labels_nb_no | inspect_conflicts.py | inspect_conflicts.py | import oursql
import sys
import simplejson as json
from wikidataeditor import Site
config = json.load(open('config.json', 'r'))
db = oursql.connect(host="wikidatawiki.labsdb", db="wikidatawiki_p", read_default_file="~/replica.my.cnf")
cur = db.cursor()
wd = Site('DanmicholoBot (+http://tools.wmflabs.org/danmicholobot/)')
wd.login(config['user'], config['pass'])
cur.execute('select de.term_entity_id from wb_terms as de join wb_terms as def on de.term_entity_id = def.term_entity_id and de.term_type = def.term_type where de.term_entity_type = "item" and def.term_entity_type = "item" and de.term_language = "nb" and def.term_language = "no" and de.term_type != "alias" and de.term_text != def.term_text')
rows = [row[0] for row in cur.fetchall()]
for entity_id in rows:
cur.execute('SELECT term_language, term_type, term_text FROM wb_terms WHERE term_entity_id=? AND term_type != "alias" AND term_language IN ("nb", "no", "en")', [entity_id])
labels = {'nb': {}, 'no': {}, 'en': {}}
for row in cur.fetchall():
labels[row[0]][row[1]] = row[2]
cur.execute('SELECT ips_site_page FROM wb_items_per_site WHERE ips_item_id=? AND ips_site_id="nowiki"', [entity_id])
sl = ''
for row in cur.fetchall():
sl = row[0]
print
print ' en: {: <30} {}'.format(labels['en'].get('label'), labels['en'].get('description'))
print ' [1] nb: {: <30} {}'.format(labels['nb'].get('label'), labels['nb'].get('description'))
print ' [2] no: {: <30} {}'.format(labels['no'].get('label'), labels['no'].get('description'))
print ' nowiki: %s' % sl
item = wd.item(entity_id)
choice = raw_input('1/2: ')
if choice == '1':
if labels['no'].get('label') is not None:
item.remove_label('no', 'merged with "nb"-label')
elif choice == '2':
item.set_label('nb', labels['no'].get('label'), 'merged from "no"-label')
item.remove_label('no', 'merged with "nb"-label')
else:
item.set_label('nb', choice, 'Updated "nb"-label')
if labels['no'].get('label') is not None:
item.remove_label('no', 'merged with "nb"-label')
choice = raw_input('Update description? ')
if choice != '':
item.set_description('nb', choice, 'from manual inspection')
print "Saved!"
#sys.exit(1)
| unlicense | Python | |
c4c52c98f4c8596b4f19c88fb64e1b0af4f9c4cd | Add New Test Which Monitors Output | WoLpH/python-progressbar | tests/test_monitor_progress.py | tests/test_monitor_progress.py | pytest_plugins = "pytester"
def test_simple_example(testdir):
""" Run the simple example code in a python subprocess and then compare its
stderr to what we expect to see from it. We run it in a subprocess to
best capture its stderr. We expect to see match_lines in order in the
output. This test is just a sanity check to ensure that the progress
bar progresses from 1 to 10, it does not make sure that the """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(10)):
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 10% \(1 of 10\)",
" 20% \(2 of 10\)",
" 30% \(3 of 10\)",
" 40% \(4 of 10\)",
" 50% \(5 of 10\)",
" 60% \(6 of 10\)",
" 70% \(7 of 10\)",
" 80% \(8 of 10\)",
" 90% \(9 of 10\)",
"100% \(10 of 10\)"
])
def test_rapid_updates(testdir):
""" Run some example code that updates 10 times, then sleeps .1 seconds,
this is meant to test that the progressbar progresses normally with
this sample code, since there were issues with it in the past """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(100)):
if i % 10 == 0:
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 1% \(1 of 100\)",
" 11% \(11 of 100\)",
" 21% \(21 of 100\)",
" 31% \(31 of 100\)",
" 41% \(41 of 100\)",
" 51% \(51 of 100\)",
" 61% \(61 of 100\)",
" 71% \(71 of 100\)",
" 81% \(81 of 100\)",
" 91% \(91 of 100\)",
"100% \(100 of 100\)"
])
| bsd-3-clause | Python | |
2a47a563d5cc16f14edc0fb56a1af2848eccde57 | Add simple wrapper for a heap. | jorik041/txrudp,OpenBazaar/txrudp,Renelvon/txrudp | txrudp/heap.py | txrudp/heap.py | """Simple heap used as reorder buffer for received messages."""
import collections
import heapq
class EmptyHeap(Exception):
"""Raised when popping from empty heap."""
class Heap(collections.Sequence):
"""
A min-heap for objects implementing total ordering.
The object with the minium order number is always at the front
of the sequence, i.e at index 0.
"""
def __init__(self):
"""Create a new (empty) Heap."""
self._heap = []
def __getitem__(self, index):
"""
Get object at given index.
Args:
index: The index of the requested object.
Returns:
Object at given index. The only object guaranteed
to have a specific value is the one at index 0, which
shall be the minimum object in the heap (assuming
a non-empty heap.)
Raises:
IndexError: No element resides in given index.
"""
return self._heap[index]
def __len__(self):
"""Return number of objects inside heap."""
return len(self._heap)
def push(self, obj):
"""
Push a new object in the heap.
Args:
obj: An object supporting total ordering.
"""
heapq.heappush(self._heap, obj)
def pop_min(self):
"""
Pop the object at the top of the heap.
Raises:
EmptyHeap: The heap is empty.
"""
try:
return heapq.heappop(self._heap)
except IndexError:
raise EmptyHeap('Cannot pop from empty heap.')
| mit | Python | |
06c3086401e8cb221a1a665598fe75a1886d7f37 | test python script for test-NN added. | ryokbys/nap,ryokbys/nap,ryokbys/nap,ryokbys/nap | example/test-NN/test_nn.py | example/test-NN/test_nn.py | #!/usr/bin/env python
"""
Test pmd run about NN potential.
Usage:
test_nn.py [options]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
import os
from docopt import docopt
import unittest
__author__ = "RYO KOBAYASHI"
__version__ = "170122"
def get_init_epot(fname='out.pmd'):
with open(fname,'r') as f:
for line in f.readlines():
if 'potential energy' in line:
dat = line.split()
epot = float(dat[2])
break
return epot
class TestNN(unittest.TestCase):
@classmethod
def setUpClass(self):
self._dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(self._dir)
print('cd ../../pmd && make pmd')
os.chdir('../../pmd')
os.system('make pmd')
os.chdir(self._dir)
cmd = '../../pmd/pmd > out.pmd 2>&1'
print(cmd)
os.system(cmd)
def test_nn(self):
os.chdir(self._dir)
eref = get_init_epot(fname='out.pmd.REF')
epot = get_init_epot(fname='out.pmd')
print('eref = {0:24.14e}'.format(eref))
print('epot = {0:24.14e}'.format(epot))
ediff = abs(eref-epot)
print('ediff= {0:24.14e}'.format(ediff))
self.assertLess(ediff,1.0e-10)
if __name__ == "__main__":
args = docopt(__doc__)
unittest.main()
| mit | Python | |
bc356875eaf52bca2b1c04548a51e0372e7948aa | Create tcpServer_SocketServer.py | thedarkcoder/SPSE,ahhh/SPSE | tcpServer_SocketServer.py | tcpServer_SocketServer.py | #/bin/python
import SocketServer
import socket
SERVER_ADDRESS = ("0.0.0.0", 8888)
class EchoHandler(SocketServer.BaseRequestHandler):
def handle(self):
print "Received a connection from: ", self.client_address
data = "start"
while len(data):
data = self.request.recv(1024)
self.request.send(data)
print "%s said: "%str(self.client_address), data
print "%s disconnected"%str(self.client_address)
if __name__ == '__main__':
print "Listening on %s"%str(SERVER_ADDRESS)
server = SocketServer.TCPServer(SERVER_ADDRESS, EchoHandler)
server.serve_forever()
| mit | Python | |
d680d6a20890d3bbce96792fa1e86df28956a859 | Add a thread helper module | finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is | helpers/threading.py | helpers/threading.py | from threading import Thread, Lock
list_lock = Lock()
def run_in_thread(app):
def wrapper(fn):
def run(*args, **kwargs):
app.logger.info('Starting thread: {}'.format(fn.__name__))
t = Thread(target=fn,
args=args,
kwargs=kwargs)
t.start()
return t
return run
return wrapper
| mit | Python | |
0d22cad3e34e7834e96e37b0268f624b45e7296f | add tests for outdoor shops | mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource | test/674-outdoor-shops.py | test/674-outdoor-shops.py | #http://www.openstreetmap.org/node/3056897308
assert_has_feature(
16, 11111, 25360, 'pois',
{ 'kind': 'fishing', 'min_zoom': 16 })
#http://www.openstreetmap.org/node/1467729495
assert_has_feature(
16, 10165, 24618, 'pois',
{ 'kind': 'hunting', 'min_zoom': 16 })
#http://www.openstreetmap.org/node/766201791
assert_has_feature(
16, 10179, 24602, 'pois',
{ 'kind': 'outdoor', 'min_zoom': 16 })
#http://www.openstreetmap.org/way/35343322
# Smaller Sports Basement store in SF
# This should really be in 16, 10483, 25332, but is in zoom 15 now
assert_has_feature(
15, 5241, 12666, 'pois',
{ 'kind': 'outdoor', 'id': 35343322 })
#http://www.openstreetmap.org/way/377630800
# Large Bass Pro building that should appear earlier
assert_has_feature(
15, 6842, 12520, 'pois',
{ 'kind': 'outdoor', 'id': 377630800 })
#http://www.openstreetmap.org/way/290195878
# Large REI building that should appear earlier
assert_has_feature(
15, 6207, 12321, 'pois',
{ 'kind': 'outdoor', 'id': 290195878 })
#http://www.openstreetmap.org/node/3931687122
assert_has_feature(
16, 10467, 25309, 'pois',
{ 'kind': 'scuba_diving', 'min_zoom': 17 })
#http://www.openstreetmap.org/node/2135237099
assert_has_feature(
16, 10558, 25443, 'pois',
{ 'kind': 'gas', 'min_zoom': 18 })
#https://www.openstreetmap.org/node/3799971066
assert_has_feature(
16, 10483, 25331, 'pois',
{ 'kind': 'motorcycle', 'min_zoom': 17 }) | mit | Python | |
3a9960849d1c9c7b519c9eb88aa3dc58c60b1eb3 | Create 3-blink.py | CamJam-EduKit/EduKit1 | Code/3-blink.py | Code/3-blink.py | #Import Libraries
import time #A collection of time related commands
import RPi.GPIO as GPIO #The GPIO commands
#Set the GPIO pin naming mode
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#Set pins 18, 23 and 24 to be output
GPIO.setup(18,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
GPIO.setup(24,GPIO.OUT)
#Turn LEDs on
GPIO.output(18,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
GPIO.output(24,GPIO.HIGH)
time.sleep(1) #Pause for 1 second
#Turn LEDs off
GPIO.output(18,GPIO.LOW)
GPIO.output(23,GPIO.LOW)
GPIO.output(24,GPIO.LOW)
time.sleep(1) #Pause for 1 second
#Turn LEDs on
GPIO.output(18,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
GPIO.output(24,GPIO.HIGH)
time.sleep(1) #Pause for 1 second
#Turn LEDs off
GPIO.output(18,GPIO.LOW)
GPIO.output(23,GPIO.LOW)
GPIO.output(24,GPIO.LOW)
#Clean up the GPIO pins
GPIO.cleanup()
| mit | Python | |
0ec1a3f3760b5977c036bc092b8b88647b3d4674 | Allow LogDevice to build without Submodules (#71) | phoad/rsocket-cpp,ReactiveSocket/reactivesocket-cpp,rsocket/rsocket-cpp,rsocket/rsocket-cpp,phoad/rsocket-cpp,rsocket/rsocket-cpp,ReactiveSocket/reactivesocket-cpp,rsocket/rsocket-cpp,phoad/rsocket-cpp,ReactiveSocket/reactivesocket-cpp,phoad/rsocket-cpp,phoad/rsocket-cpp | build/fbcode_builder/specs/rocksdb.py | build/fbcode_builder/specs/rocksdb.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def fbcode_builder_spec(builder):
builder.add_option("rocksdb/_build:cmake_defines", {
"USE_RTTI": "1",
"PORTABLE": "ON",
})
return {
"steps": [
builder.fb_github_cmake_install("rocksdb/_build"),
],
}
| unknown | Python | |
26fa3f083468acb2155d811ce65d4d4d6aafe53b | Integrate LLVM at llvm/llvm-project@4b33ea052ab7 | tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "4b33ea052ab7fbceed4c62debf1145f80d66b0d7"
LLVM_SHA256 = "b6c61a6c81b1910cc34ccd9800fd18afc2dc1b9d76c640dd767f9e550f94c8d6"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "58a47508f03546b4fce668fad751102b94feacfd"
LLVM_SHA256 = "b3106ecc8ad56ecf9cd72c8a47117342e7641ef5889acb1d9093bcd0f2920d59"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| apache-2.0 | Python |
0cf2ce2331120c20de0cab384c5fdec763c25c68 | Add a simple markov chain to compare output with RNN | eliben/deep-learning-samples,eliben/deep-learning-samples | min-char-rnn/markov-model.py | min-char-rnn/markov-model.py | # Simple Markov chain model for character-based text generation.
#
# Only tested with Python 3.6+
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
| unlicense | Python | |
80c749e8b8395f20305c04c480fbf39400f1b5a4 | Add failing test for index route | wkevina/feature-requests-app,wkevina/feature-requests-app,wkevina/feature-requests-app | features/tests/test_index.py | features/tests/test_index.py | from django.test import TestCase
class TestIndex(TestCase):
"""Verify the index page is served properly"""
def test_root(self):
# Fetch page from '/'
reponse = self.client.get('/')
# Should respond OK
self.assertEqual(reponse.status_code, 200)
# Should be rendered from a particular template
self.assertEqual(reponse.templates[0].name, 'features/index.html')
def test_only_root(self):
"""Should only be served from /"""
# Fetch from path that shouldn't exist
response = self.client.get('/extrastuff')
# Should 404 out
self.assertEqual(response.status_code, 404)
# Fetch from path that may exist
response = self.client.get('/api/')
# Make sure it's not the index if it does exist
if response.status_code == 200:
if response.templates:
self.assertNotEqual(response.templates[0].name, 'features/index.html')
| mit | Python | |
3f7b54496826f496863de545a601c23c2c06427a | Add first-party JavaScript build rule library | clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage | shipyard2/shipyard2/rules/javascripts.py | shipyard2/shipyard2/rules/javascripts.py | """Helpers for writing rules for first-party JavaScript packages."""
__all__ = [
'define_package',
'find_package',
]
import dataclasses
import logging
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from shipyard2 import rules
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PackageRules:
build: foreman.Rule
def define_package(
*,
name_prefix='',
deps=(),
sub_directory_path=None,
):
"""Define a first-party package.
This defines:
* Rule: [name_prefix/]build.
"""
name_prefix = rules.canonicalize_name_prefix(name_prefix)
rule_build = name_prefix + 'build'
@foreman.rule(rule_build)
@foreman.rule.depend('//bases:build')
@foreman.rule.depend('//third-party/nodejs:build')
def build(parameters):
src_path = find_package(
parameters,
foreman.get_relpath(),
sub_directory_path,
)
LOG.info('build first-party package: %s', src_path)
with scripts.using_cwd(src_path):
scripts.run(['npm', 'install'])
scripts.run(['npm', 'run', 'build'])
for dep in deps:
build = build.depend(dep)
return PackageRules(build=build)
def find_package(parameters, relpath, sub_directory_path=None):
"""Find path to a first-party package."""
root_paths = parameters['//bases:roots']
for root_path in root_paths:
path = root_path / relpath
if sub_directory_path:
path /= sub_directory_path
if (path / 'package.json').is_file():
return path
return ASSERT.unreachable(
'cannot find package {} under: {}', relpath, root_paths
)
| mit | Python | |
18b98aff76c0b69748b5dd4b8ca27bd8d8b8aec8 | Add PassiveAggressiveClassifier to benchmark | rhiever/sklearn-benchmarks | model_code/PassiveAggressiveClassifier.py | model_code/PassiveAggressiveClassifier.py | import sys
import pandas as pd
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import itertools
dataset = sys.argv[1]
# Read the data set into memory
input_data = pd.read_csv(dataset, compression='gzip', sep='\t')
for (C, loss, fit_intercept) in itertools.product([0.01, 0.1, 0.5, 1.0, 10.0, 50.0, 100.0],
['hinge', 'squared_hinge'],
[True, False]):
for dataset_repeat in range(1, 31):
# Divide the data set into a training and testing sets, each time with a different RNG seed
training_indices, testing_indices = next(iter(StratifiedShuffleSplit(input_data['class'].values,
n_iter=1,
train_size=0.75,
test_size=0.25,
random_state=dataset_repeat)))
training_features = input_data.loc[training_indices].drop('class', axis=1).values
training_classes = input_data.loc[training_indices, 'class'].values
testing_features = input_data.loc[testing_indices].drop('class', axis=1).values
testing_classes = input_data.loc[testing_indices, 'class'].values
ss = StandardScaler()
training_features = ss.fit_transform(training_features.astype(float))
testing_features = ss.transform(testing_features.astype(float))
# Create and fit the model on the training data
try:
clf = PassiveAggressiveClassifier(C=C, loss=loss, fit_intercept)
clf.fit(training_features, training_classes)
testing_score = clf.score(testing_features, testing_classes)
except KeyboardInterrupt:
sys.exit(1)
except:
continue
param_string = ''
param_string += 'C={},'.format(C)
param_string += 'loss={},'.format(loss)
param_string += 'fit_intercept={}'.format(fit_intercept)
out_text = '\t'.join([dataset.split('/')[-1][:-7],
'PassiveAggressiveClassifier',
param_string,
str(testing_score)])
print(out_text)
| mit | Python | |
6a74915c3f197ef197a34514c7ff313ac0a68d2f | Add migration to delete existing cache values | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/fixtures/migrations/0002_rm_blobdb_domain_fixtures.py | corehq/apps/fixtures/migrations/0002_rm_blobdb_domain_fixtures.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-08 10:23
from __future__ import unicode_literals
from django.db import migrations
from corehq.blobs import get_blob_db
from corehq.sql_db.operations import HqRunPython
FIXTURE_BUCKET = 'domain-fixtures'
def rm_blobdb_domain_fixtures(apps, schema_editor):
get_blob_db().delete(bucket=FIXTURE_BUCKET)
def noop_reverse_migration(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fixtures', '0001_initial'),
]
operations = [
HqRunPython(rm_blobdb_domain_fixtures, noop_reverse_migration),
]
| bsd-3-clause | Python | |
e184b806c6170aad2bdee87c051ea6400e1d954e | Add unit tests for the Document class | samueldg/clippings | tests/parser_test.py | tests/parser_test.py | import unittest
from clippings.parser import Document
class DocumentTest(unittest.TestCase):
def test_create_document(self):
title = 'Haunted'
authors = ['Chuck Palahniuk']
document = Document(title, authors)
self.assertEqual(title, document.title)
self.assertEqual(authors, document.authors)
def test_parse_document_with_single_author(self):
document_line = '1984 (George Orwell)'
document = Document.parse(document_line)
expected_authors = ['George Orwell']
self.assertEqual(expected_authors, document.authors)
expected_title = '1984'
self.assertEqual(expected_title, document.title)
def test_parse_document_with_multiple_authors(self):
document_line = 'Java Concurrency in Practice (Joshua Bloch;Brian Goetz)'
document = Document.parse(document_line)
expected_authors = [
'Joshua Bloch',
'Brian Goetz',
]
self.assertEqual(expected_authors, document.authors)
expected_title = 'Java Concurrency in Practice'
self.assertEqual(expected_title, document.title)
def test_document_to_string(self):
title = 'Also sprach Zarathustra'
authors = ['Friedrich Nietzsche']
document = Document(title, authors)
expected_string = 'Also sprach Zarathustra (Friedrich Nietzsche)'
self.assertEqual(expected_string, str(document))
def test_document_to_dict(self):
title = 'À la recherche du temps perdu'
authors = ['Marcel Proust']
document = Document(title, authors)
expected_dict = {
'title': title,
'authors': authors
}
self.assertEqual(expected_dict, document.to_dict())
| mit | Python | |
98006f7b27195153c1eb5d19b5902b60b0ffd963 | add unit tests for events.py | jait/tupelo | tests/test_events.py | tests/test_events.py | #!/usr/bin/env python
# vim: set sts=4 sw=4 et:
import unittest
import events
import rpc
class TestEventsRPC(unittest.TestCase):
def testEvent(self, cls=None):
cls = events.Event
e = cls()
e2 = rpc.rpc_decode(cls, rpc.rpc_encode(e))
self.assert_(isinstance(e2, cls))
self.assertEqual(e.type, e2.type)
def testCardPlayedEmpty(self):
cls = events.CardPlayedEvent
e = cls()
e2 = rpc.rpc_decode(cls, rpc.rpc_encode(e))
self.assert_(isinstance(e2, cls))
self.assertEqual(e.type, e2.type)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
e9cfb095ac4261c8bf959d1c9b904256c267178f | Add basic unit test about /variables endpoint | openfisca/openfisca-web-api,openfisca/openfisca-web-api | openfisca_web_api/tests/test_variables.py | openfisca_web_api/tests/test_variables.py | # -*- coding: utf-8 -*-
import json
from nose.tools import assert_equal, assert_greater, assert_in, assert_is_instance
from webob import Request
from . import common
def setup_module(module):
common.get_or_load_app()
def test_basic_call():
req = Request.blank('/api/1/variables', method = 'GET')
res = req.get_response(common.app)
assert_equal(res.status_code, 200, res.body)
res_json = json.loads(res.body)
assert_is_instance(res_json, dict)
assert_in('country_package_name', res_json)
assert_in('country_package_version', res_json)
assert_in('variables', res_json)
assert_is_instance(res_json['variables'], list)
source_file_path = res_json['variables'][0]['source_file_path']
assert source_file_path.startswith('model'), source_file_path
| agpl-3.0 | Python | |
bcfccc8d7b19895f793e63289dcbbf9bf1ed834b | add unittest for home view | DimaWittmann/commute-together,DimaWittmann/commute-together,DimaWittmann/commute-together | commute_together/commute_together/tests/test_views.py | commute_together/commute_together/tests/test_views.py | from datetime import timedelta, datetime
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from commute_together.models import MeetingModel, StationModel
class HomePageTest(TestCase):
fixtures = ['StationModel.json']
def test_home_page_renders_template(self):
response = self.client.get(reverse('home'))
self.assertTemplateUsed(response, 'home.html')
| mit | Python | |
ff673561c87bd16938a7c3f4d7609f91afbe9078 | add a script to perform the event averaging for the HBT correlation function | chunshen1987/HBT_correlation,chunshen1987/HBT_correlation,chunshen1987/HBT_correlation | ebe_scripts/average_event_HBT_correlation_function.py | ebe_scripts/average_event_HBT_correlation_function.py | #! /usr/bin/env python
"""
This script performs event averaging for the HBT correlation function
calculated from event-by-event simulations
"""
from sys import argv, exit
from os import path
from glob import glob
from numpy import *
# define colors
purple = "\033[95m"
green = "\033[92m"
blue = "\033[94m"
yellow = "\033[93m"
red = "\033[91m"
normal = "\033[0m"
try:
working_folder = path.abspath(argv[1])
except(IOError):
print("Usage: average_event_HBT_correlation_function.py working_folder")
exit(1)
file_folder_list = glob(path.join(working_folder, '*'))
results_folder_name = 'HBT_results'
KT_values = ['0', '0.2', '0.4', '0.6', '0.8', '1.0']
nev = len(file_folder_list)
for iKT in range(len(KT_values)):
file_name = 'correlfunct_Pion(+)_kt_%s.dat' % KT_values[iKT]
event_avg_data = loadtxt(path.join(file_folder_list[0], results_folder_name,
'correlfunct_Pion(+)_kt_0.dat'))*0.0
num_err = zeros(len(event_avg_data[:, 0]))
denorm_err = zeros(len(event_avg_data[:, 0]))
for ifolder in range(nev):
results_folder = path.join(file_folder_list[ifolder],
results_folder_name)
temp_data = loadtxt(path.join(results_folder, file_name))
event_avg_data += temp_data
num_err += temp_data[:, 3]**2.
denorm_err += temp_data[:, 4]**2.
event_avg_data = event_avg_data/nev
num_err = (num_err/nev - event_avg_data[:, 3]**2)/sqrt(nev)
denorm_err = (denorm_err/nev - event_avg_data[:, 4]**2)/sqrt(nev)
event_avg_data[:, 5] = event_avg_data[:, 3]/event_avg_data[:, 4]
event_avg_data[:, 6] = sqrt(
(num_err/event_avg_data[:, 4])**2
+ (event_avg_data[:, 3]*denorm_err/(event_avg_data[:, 4]**2))**2)
savetxt(file_name, event_avg_data, fmt='%.10e', delimiter=' ')
print "Analysis is done."
| mit | Python | |
c23d2711757a68f1348f8353aba7b3ec2d17a1d6 | Structure prediction boilerplate | WMD-group/SMACT | smact/structure_prediction/prediction.py | smact/structure_prediction/prediction.py | """Structure prediction implementation."""
from typing import Generator, List, Tuple, Optional
from .database import StructureDB
from .mutation import CationMutator
from .structure import SmactStructure
class StructurePredictor:
"""Provides structure prediction functionality.
Implements a statistically-based model for determining
likely structures of a given composition, based on a
database of known compositions and a lambda table of
weights.
Based on the algorithm presented in:
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011)
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663.
`doi:10.1021/ic102031h <https://pubs.acs.org/doi/10.1021/ic102031h>`_
"""
def __init__(self, mutator: CationMutator, struct_db: StructureDB):
"""Initialize class."""
self.cm = mutator
self.db = struct_db
def predict_structs(
self,
species: List[Tuple[str, int]],
thresh: Optional[float] = 1e-3, ) -> Generator[Tuple[SmactStructure, float], None, None]:
"""Predict structures for a combination of species.
Args:
species: A list of (element, charge). The constituent species
of the target compound.
thresh: The probability threshold, below which to discard
predictions.
Yields:
Potential structures, as tuples of (structure, probability).
"""
raise NotImplementedError
| mit | Python | |
f49b6ad21ed9c8646c402a37015a80258fb79a68 | ADD EVALUATE PROCESS | taiducvu/VNG-NSFW-DETECTION | evaluate_model.py | evaluate_model.py | import tensorflow as tf
import os
import glob
import vng_model as md
import numpy as np
import csv
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint_dir', '',
"""Direction where the trained weights of model is save""")
tf.app.flags.DEFINE_string('eval_data_path', '',
"""The direction of folder that contains evaluation data-set""")
tf.app.flags.DEFINE_integer('batch_size', 100,
"""The size of a data batch""")
tf.app.flags.DEFINE_string('output_path', '',
"""The direction of folder that contains the output of model""")
def read_data(filename_queue, height, width, channels):
reader = tf.WholeFileReader()
filename, value = reader.read(filename_queue)
image_sample = tf.image.decode_jpeg(value, channels=channels)
image_sample = tf.expand_dims(image_sample, 0)
image_sample = tf.image.resize_bilinear(image_sample, [height, width])
image_sample = tf.squeeze(image_sample, [0])
return image_sample, filename
def eval_model():
"""
The function evaluate the model
:return:
"""
eval_data_path = []
for path in glob.iglob(os.path.join(FLAGS.eval_data_path, '*.jpeg')):
eval_data_path.append(path)
with tf.Graph().as_default() as g:
filename_queue = tf.train.string_input_producer(eval_data_path)
sample, filename = read_data(filename_queue, 224, 224, 3)
batch_samples, batch_filename = tf.train.batch([sample, filename],
batch_size=FLAGS.batch_size,
capacity=FLAGS.batch_size,
name='input_test_data')
# Build the VNG model
x = tf.placeholder(tf.float32, (None, 224, 224, 3), name='input_features')
y_ = tf.placeholder(tf.int32, (None,), name='labels')
logit = md.inference_resnet(x, False)
hat_y = tf.arg_max(logit, 1, name='predict_label')
# Load trained weights into model
ckpt_model = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
saver_model = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))
# Run
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if ckpt_model and ckpt_model.model_checkpoint_path:
saver_model.restore(sess, ckpt_model.model_checkpoint_path)
print('Load trained weights successfully!')
else:
print('No checkpoint found!')
num_iter = len(eval_data_path) / FLAGS.batch_size
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
with open(FLAGS.output_path, "wb") as f:
writer = csv.writer(f)
for _ in range(num_iter):
batch_images, batch_name = sess.run([batch_samples, batch_filename])
predicted_lb = sess.run(hat_y, feed_dict={x:batch_images})
result_model = np.column_stack((np.array(batch_name),
np.array(predicted_lb)))
writer.writerows(result_model)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=5)
sess.close()
| mit | Python | |
3d5955767d81f45e796ab2af0707533375681774 | add runtests-windows.py script | sfeltman/pygobject,Distrotech/pygobject,davidmalcolm/pygobject,davidmalcolm/pygobject,thiblahute/pygobject,choeger/pygobject-cmake,jdahlin/pygobject,MathieuDuponchelle/pygobject,GNOME/pygobject,sfeltman/pygobject,Distrotech/pygobject,nzjrs/pygobject,Distrotech/pygobject,alexef/pygobject,alexef/pygobject,thiblahute/pygobject,davibe/pygobject,GNOME/pygobject,MathieuDuponchelle/pygobject,MathieuDuponchelle/pygobject,davibe/pygobject,nzjrs/pygobject,davibe/pygobject,Distrotech/pygobject,GNOME/pygobject,pexip/pygobject,alexef/pygobject,jdahlin/pygobject,choeger/pygobject-cmake,pexip/pygobject,sfeltman/pygobject,jdahlin/pygobject,choeger/pygobject-cmake,davidmalcolm/pygobject,thiblahute/pygobject,pexip/pygobject,nzjrs/pygobject,davibe/pygobject | tests/runtests-windows.py | tests/runtests-windows.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import unittest
os.environ['PYGTK_USE_GIL_STATE_API'] = ''
sys.path.insert(0, os.path.dirname(__file__))
sys.argv.append('--g-fatal-warnings')
import gobject
gobject.threads_init()
SKIP_FILES = ['runtests',
'test_gio', # python crash
'test_gresolver', # python crash
'test_gsocket', # blocks on test_socket_condition_wait
'test_mainloop', # no os.fork on windows
'test_subprocess'] # blocks on testChildWatch
if __name__ == '__main__':
testdir = os.path.split(os.path.abspath(__file__))[0]
os.chdir(testdir)
def gettestnames():
files = glob.glob('*.py')
names = map(lambda x: x[:-3], files)
map(names.remove, SKIP_FILES)
return names
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for name in gettestnames():
try:
suite.addTest(loader.loadTestsFromName(name))
except Exception, e:
print 'Could not load %s: %s' % (name, e)
testRunner = unittest.TextTestRunner()
testRunner.verbosity = 2
testRunner.run(suite)
| lgpl-2.1 | Python | |
fffb2a38870afd0c9a6d1ab5efd36b75f98c9a9a | test the deprecation class. | douban/brownant | tests/test_deprecation.py | tests/test_deprecation.py | from brownant import Brownant, BrownAnt
def test_deprecation(recwarn):
app = BrownAnt()
warning = recwarn.pop(DeprecationWarning)
assert isinstance(app, Brownant)
assert issubclass(warning.category, DeprecationWarning)
assert "Brownant" in str(warning.message)
assert "app.py" in warning.filename
assert warning.lineno
| bsd-3-clause | Python | |
404c9b70cf9b6c27e0fb16be1556d01b5077a4f4 | Add test cases for 500 error with slow responses | alisaifee/flask-limiter,joshfriend/flask-limiter,alisaifee/limits,alisaifee/limits,joshfriend/flask-limiter,alisaifee/flask-limiter | tests/test_regressions.py | tests/test_regressions.py | """
"""
import time
import logging
import unittest
from flask import Flask
import mock
from flask.ext.limiter.extension import C, Limiter
class RegressionTests(unittest.TestCase):
def build_app(self, config={}, **limiter_args):
app = Flask(__name__)
for k,v in config.items():
app.config.setdefault(k,v)
limiter = Limiter(app, **limiter_args)
mock_handler = mock.Mock()
mock_handler.level = logging.INFO
limiter.logger.addHandler(mock_handler)
return app, limiter
def test_redis_request_slower_than_fixed_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "fixed-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
def test_redis_request_slower_than_moving_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "moving-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.