commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
9fe4ce918456a3470cf4eb50212af9a487c03ce4 | add tests for utils | corehq/apps/auditcare/tests/test_auditcare_migration.py | corehq/apps/auditcare/tests/test_auditcare_migration.py | from django.test.testcases import TransactionTestCase
from corehq.apps.auditcare.models import AuditcareMigrationMeta
from datetime import datetime
from unittest.mock import patch
from django.test import SimpleTestCase
from corehq.apps.auditcare.management.commands.copy_events_to_sql import Command
from corehq.apps.auditcare.utils.migration import AuditCareMigrationUtil, get_formatted_datetime_string
from django.core.cache import cache
class TestCopyEventsToSQL(SimpleTestCase):
start_time = datetime(2020, 6, 1)
@classmethod
def setUpClass(cls):
return super().setUpClass()
@patch('corehq.apps.auditcare.management.commands.copy_events_to_sql.AuditCareMigrationUtil.get_next_batch_start', return_value=start_time)
def test_generate_batches(self, _):
batches = Command().generate_batches(2, 'h')
expected_batches = [
[datetime(2020, 6, 1), datetime(2020, 6, 1, 1)],
[datetime(2020, 6, 1, 1), datetime(2020, 6, 1, 2)]
]
self.assertEquals(batches, expected_batches)
batches = Command().generate_batches(2, 'd')
expected_batches = [
[datetime(2020, 6, 1), datetime(2020, 6, 2)],
[datetime(2020, 6, 2), datetime(2020, 6, 3)]
]
self.assertEquals(batches, expected_batches)
class TestAuditcareMigrationUtil(TransactionTestCase):
util = AuditCareMigrationUtil()
start_time = datetime(2020, 6, 1)
@classmethod
def setUpClass(cls):
cls.key = get_formatted_datetime_string(datetime.now()) + '_' + get_formatted_datetime_string(datetime.now())
cache.set(cls.util.start_key, cls.start_time)
return super().setUpClass()
def test_get_next_batch_start(self):
start_time = self.util.get_next_batch_start()
self.assertEqual(start_time, self.start_time)
def test_locking_functionality(self):
self.util.acquire_read_lock()
self.assertRaises(Exception, self.util.get_next_batch_start)
self.util.release_read_lock()
start_time = self.util.get_next_batch_start()
self.assertEqual(start_time, self.start_time)
def test_log_batch_start(self):
self.util.log_batch_start(self.key)
self.util.log_batch_start(self.key)
expected_log = AuditcareMigrationMeta.objects.filter(key=self.key)
self.assertEqual(len(expected_log), 1)
self.assertEqual(expected_log[0].key, self.key)
expected_log[0].delete()
def test_set_batch_as_finished(self):
AuditcareMigrationMeta.objects.create(key=self.key, state=AuditcareMigrationMeta.STARTED)
self.util.set_batch_as_finished(self.key, 30)
expected_log = AuditcareMigrationMeta.objects.filter(key=self.key)
self.assertEqual(expected_log[0].state, AuditcareMigrationMeta.FINISHED)
expected_log[0].delete()
def test_set_batch_as_errored(self):
AuditcareMigrationMeta.objects.create(key=self.key, state=AuditcareMigrationMeta.STARTED)
self.util.set_batch_as_errored(self.key)
expected_log = AuditcareMigrationMeta.objects.filter(key=self.key)
self.assertEqual(expected_log[0].state, AuditcareMigrationMeta.ERRORED)
expected_log[0].delete()
def test_get_errored_keys(self):
start_time = datetime(2020, 6, 20)
end_time = datetime(2020, 6, 21)
key = get_formatted_datetime_string(start_time) + '_' + get_formatted_datetime_string(end_time)
obj = AuditcareMigrationMeta.objects.create(key=key, state=AuditcareMigrationMeta.ERRORED)
keys = self.util.get_errored_keys(1)
self.assertEqual([[start_time, end_time]], keys)
obj.delete()
@classmethod
def tearDownClass(cls):
cache.delete(cls.util.start_key)
cache.delete(cls.util.start_lock_key)
AuditcareMigrationMeta.objects.all().delete()
return super().tearDownClass()
| Python | 0 | |
cee2683d3c0a60739b8e4f1c1dbaa74981a42392 | add class skeleton for schedule generator | angular_flask/classtime/scheduler.py | angular_flask/classtime/scheduler.py | class Scheduler(object):
"""
Helper class which builds optimal schedules out of
class listings.
Use static methods only - do not create instances of
the class.
"""
def __init__(self):
pass
@staticmethod
def generate_schedule(classtimes):
"""
Generates one good schedule based on the classtimes
provided.
classtimes should be in the following format:
[
{
'course_name' : 'somename',
'course_attr_a' : 'someattr',
...
'day' : '<daystring>',
'startTime' : '<time>',
'endTime' : '<time>'
},
...
{
...
}
]
Where <daystring> is a string containing the days the
class is scheduled on:
- UMTWRFS is Sunday...Saturday
- eg 'MWF' or 'TR'
And <time> is a time of format 'HH:MM XM'
- eg '08:00 AM'
"""
pass
| Python | 0 | |
82e4c67bd7643eed06e7cd170ca1d0de41c70912 | Add a data analyzer class. | core/data/DataAnalyzer.py | core/data/DataAnalyzer.py | """
DataAnalyzer
:Authors:
Berend Klein Haneveld
"""
class DataAnalyzer(object):
"""
DataAnalyzer
"""
def __init__(self):
super(DataAnalyzer, self).__init__()
@classmethod
def histogramForData(cls, data, nrBins):
"""
Samples the image data in order to create bins
for making a histogram of the data.
"""
dims = data.GetDimensions()
minVal, maxVal = data.GetScalarRange()
bins = [0 for x in range(nrBins)]
stepSize = 3
for z in range(0, dims[2], stepSize):
for y in range(0, dims[1], stepSize):
for x in range(0, dims[0], stepSize):
element = data.GetScalarComponentAsFloat(x, y, z, 0)
index = int(((element - minVal) / float(maxVal - minVal)) * (nrBins-1))
bins[index] += 1
return bins
| Python | 0 | |
3f85610873d88592970c64661e526b2a576e300f | Add new sms message generator | sms_generator.py | sms_generator.py | def generate_new_procedure_message(procedure, ward, timeframe, doctor):
unique_reference = str(1)
message = str.format("{0} is available on {1}. Attend the ward in {2} and meet {3} in the junior doctors' office. "
"To accept this opportunity reply with {4}",
procedure,
ward,
timeframe,
doctor,
unique_reference)
print(message)
return message
def generate_success_response_message(procedure, ward, timeframe, doctor):
message = str.format("Please attend {0} in {1} and ask for {2} to complete this supervised "
"procedure. This learning opportunity has been reserved exclusively for you, please make "
"every effort to attend.",
ward,
timeframe,
doctor)
print(message)
return message
def generate_not_success_response_message():
message = str.format("Sorry - procedure already taken this time.")
print(message)
return message | Python | 0.000003 | |
926631d068a223788714cd645ae5336881c6853f | Update messageable.py | praw/models/reddit/mixins/messageable.py | praw/models/reddit/mixins/messageable.py | """Provide the MessageableMixin class."""
from ....const import API_PATH
class MessageableMixin:
"""Interface for classes that can be messaged."""
def message(self, subject, message, from_subreddit=None):
"""
Send a message to a redditor or a subreddit's moderators (mod mail).
:param subject: The subject of the message.
:param message: The message content.
:param from_subreddit: A :class:`~.Subreddit` instance or string to
send the message from. When provided, messages are sent from
the subreddit rather than from the authenticated user.
Note that the authenticated user must be a moderator of the
subreddit and have the ``mail`` moderator permission.
For example, to send a private message to ``u/spez``, try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from PRAW')
To send a message to ``u/spez`` from the moderators of ``r/test`` try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from r/test',
from_subreddit='test')
To send a message to the moderators of ``r/test``, try:
.. code:: python
reddit.subreddit('test').message('TEST', 'test PM from PRAW')
"""
data = {
"subject": subject,
"text": message,
"to": "{}{}".format(
getattr(self.__class__, "MESSAGE_PREFIX", ""), self
),
}
if from_subreddit:
data["from_sr"] = str(from_subreddit)
self._reddit.post(API_PATH["compose"], data=data)
| """Provide the MessageableMixin class."""
from ....const import API_PATH
class MessageableMixin:
"""Interface for classes that can be messaged."""
def message(self, subject, message, from_subreddit=None):
"""
Send a message to a redditor or a subreddit's moderators (mod mail).
:param subject: The subject of the message.
:param message: The message content.
:param from_subreddit: A :class:`~.Subreddit` instance or string to send the
message from. When provided, messages are sent from the subreddit
rather than from the authenticated user. Note that the
authenticated user must be a moderator of the subreddit and have
the ``mail`` moderator permission.
For example, to send a private message to ``/u/spez``, try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from PRAW')
To send a message to ``u/spez`` from the moderators of ``r/test`` try:
.. code:: python
reddit.redditor('spez').message('TEST', 'test message from r/test',
from_subreddit='test')
To send a message to the moderators of ``/r/test``, try:
.. code:: python
reddit.subreddit('test').message('TEST', 'test PM from PRAW')
"""
data = {
"subject": subject,
"text": message,
"to": "{}{}".format(
getattr(self.__class__, "MESSAGE_PREFIX", ""), self
),
}
if from_subreddit:
data["from_sr"] = str(from_subreddit)
self._reddit.post(API_PATH["compose"], data=data)
| Python | 0.000001 |
73bd8200f6ad23c60a05831e3b79497b830f19cd | Update old lithium comments about llvm-symbolizer 3.6 to 3.8 versions. | interestingness/envVars.py | interestingness/envVars.py | #!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import platform
isLinux = (platform.system() == 'Linux')
isMac = (platform.system() == 'Darwin')
isWin = (platform.system() == 'Windows')
ENV_PATH_SEPARATOR = ';' if os.name == 'nt' else ':'
def envWithPath(path, runningEnv=os.environ):
"""Append the path to the appropriate library path on various platforms."""
if isLinux:
libPath = 'LD_LIBRARY_PATH'
elif isMac:
libPath = 'DYLD_LIBRARY_PATH'
elif isWin:
libPath = 'PATH'
env = copy.deepcopy(runningEnv)
if libPath in env:
if path not in env[libPath]:
env[libPath] += ENV_PATH_SEPARATOR + path
else:
env[libPath] = path
return env
def findLlvmBinPath():
"""Return the path to compiled LLVM binaries, which differs depending on compilation method."""
if isLinux:
# Assumes clang was installed through apt-get. Works with version 3.6.2,
# assumed to work with clang 3.8.0.
# Create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.8
if os.path.isfile('/usr/bin/llvm-symbolizer'):
return ''
else:
print 'WARNING: Please install clang via `apt-get install clang` if using Ubuntu.'
print 'then create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.8.'
print 'Try: `ln -s /usr/bin/llvm-symbolizer-3.8 /usr/bin/llvm-symbolizer`'
return ''
if isMac:
# Assumes LLVM was installed through Homebrew. Works with at least version 3.6.2.
brewLLVMPath = '/usr/local/opt/llvm/bin'
if os.path.isdir(brewLLVMPath):
return brewLLVMPath
else:
print 'WARNING: Please install llvm from Homebrew via `brew install llvm`.'
print 'ASan stacks will not have symbols as Xcode does not install llvm-symbolizer.'
return ''
# https://developer.mozilla.org/en-US/docs/Building_Firefox_with_Address_Sanitizer#Manual_Build
if isWin:
return None # The harness does not yet support Clang on Windows
| #!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import platform
isLinux = (platform.system() == 'Linux')
isMac = (platform.system() == 'Darwin')
isWin = (platform.system() == 'Windows')
ENV_PATH_SEPARATOR = ';' if os.name == 'nt' else ':'
def envWithPath(path, runningEnv=os.environ):
"""Append the path to the appropriate library path on various platforms."""
if isLinux:
libPath = 'LD_LIBRARY_PATH'
elif isMac:
libPath = 'DYLD_LIBRARY_PATH'
elif isWin:
libPath = 'PATH'
env = copy.deepcopy(runningEnv)
if libPath in env:
if path not in env[libPath]:
env[libPath] += ENV_PATH_SEPARATOR + path
else:
env[libPath] = path
return env
def findLlvmBinPath():
"""Return the path to compiled LLVM binaries, which differs depending on compilation method."""
if isLinux:
# Assumes clang was installed through apt-get. Works with version 3.6.2.
# Create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.6
if os.path.isfile('/usr/bin/llvm-symbolizer'):
return ''
else:
print 'WARNING: Please install clang via `apt-get install clang` if using Ubuntu.'
print 'then create a symlink at /usr/bin/llvm-symbolizer for: /usr/bin/llvm-symbolizer-3.6.'
print 'Try: `ln -s /usr/bin/llvm-symbolizer-3.6 /usr/bin/llvm-symbolizer`'
return ''
if isMac:
# Assumes LLVM was installed through Homebrew. Works with at least version 3.6.2.
brewLLVMPath = '/usr/local/opt/llvm/bin'
if os.path.isdir(brewLLVMPath):
return brewLLVMPath
else:
print 'WARNING: Please install llvm from Homebrew via `brew install llvm`.'
print 'ASan stacks will not have symbols as Xcode does not install llvm-symbolizer.'
return ''
# https://developer.mozilla.org/en-US/docs/Building_Firefox_with_Address_Sanitizer#Manual_Build
if isWin:
return None # The harness does not yet support Clang on Windows
| Python | 0 |
378cb69d413eb8ffaf811b607fc037be923a2aba | Write tests for SSLRedirectMiddleware | iogt/tests/test_middleware.py | iogt/tests/test_middleware.py | from django.test import (
TestCase,
Client,
RequestFactory,
override_settings,
)
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import Main
from iogt.middleware import SSLRedirectMiddleware
PERMANENT_REDIRECT_STATUS_CODE = 301
@override_settings(HTTPS_PATHS=['admin'])
class TestSSLRedirectMiddleware(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.factory = RequestFactory()
def test_no_redirect_for_home_page(self):
request = self.factory.get('/')
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response, None)
def test_no_redirect_with_https(self):
headers = {'HTTP_X_FORWARDED_PROTO': 'https'}
request = self.factory.get('/', **headers)
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response, None)
def test_no_redirect_when_secure(self):
headers = {'HTTP_X_FORWARDED_PROTO': 'https'}
request = self.factory.get('/admin/', **headers)
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response, None)
def test_redirect_when_not_secure(self):
request = self.factory.get('/admin/')
middleware = SSLRedirectMiddleware()
response = middleware.process_request(request)
self.assertEqual(response.status_code,
PERMANENT_REDIRECT_STATUS_CODE)
| Python | 0 | |
e0ac456eae45a1b7e1482ff712be600b384f94b3 | Include new example to show group circle connectivity. | examples/connectivity/plot_custom_grouped_connectivity_circle.py | examples/connectivity/plot_custom_grouped_connectivity_circle.py | #!/usr/bin/env python
"""
Example how to create a custom label groups and plot grouped connectivity
circle with these labels.
Author: Praveen Sripad <pravsripad@gmail.com>
Christian Kiefer <ch.kiefer@fz-juelich.de>
"""
import matplotlib.pyplot as plt
from jumeg import get_jumeg_path
from jumeg.connectivity import (plot_grouped_connectivity_circle,
generate_random_connectivity_matrix)
import yaml
labels_fname = get_jumeg_path() + '/data/desikan_label_names.yaml'
replacer_dict_fname = get_jumeg_path() + '/data/replacer_dictionaries.yaml'
with open(labels_fname, 'r') as f:
label_names = yaml.safe_load(f)['label_names']
with open(replacer_dict_fname, 'r') as f:
replacer_dict = yaml.safe_load(f)['replacer_dict_aparc']
# make a random matrix with 68 nodes
# use simple seed for reproducibility
con = generate_random_connectivity_matrix(size=(68, 68), symmetric=True)
# make groups based on lobes
occipital = ['lateraloccipital', 'lingual', 'cuneus', 'pericalcarine']
parietal = ['superiorparietal', 'inferiorparietal', 'precuneus',
'postcentral', 'supramarginal']
temporal = ['bankssts', 'temporalpole', 'superiortemporal', 'middletemporal',
'transversetemporal', 'inferiortemporal', 'fusiform',
'entorhinal', 'parahippocampal']
insula = ['insula']
cingulate = ['rostralanteriorcingulate', 'caudalanteriorcingulate',
'posteriorcingulate', 'isthmuscingulate']
frontal = ['superiorfrontal', 'rostralmiddlefrontal', 'caudalmiddlefrontal',
'parsopercularis', 'parsorbitalis', 'parstriangularis',
'lateralorbitofrontal', 'medialorbitofrontal', 'precentral',
'paracentral', 'frontalpole']
# we need a list of dictionaries, one dict for each group to denote grouping
label_groups = [{'occipital': occipital}, {'parietal': parietal},
{'temporal': temporal}, {'insula': insula},
{'cingulate': cingulate},
{'frontal': frontal}]
n_colors = len(label_groups)
cmap = plt.get_cmap('Pastel1')
cortex_colors = cmap.colors[:n_colors] + cmap.colors[:n_colors][::-1]
# plot simple connectivity circle with cortex based grouping and colors
plot_grouped_connectivity_circle(label_groups, con, label_names,
labels_mode='replace',
replacer_dict=replacer_dict,
cortex_colors=cortex_colors, vmin=0., vmax=1.,
out_fname='fig_grouped_con_circle_cortex.png',
colorbar_pos=(0.1, 0.1), n_lines=50, colorbar=True,
colormap='viridis')
| Python | 0 | |
f50efeb78d9b503a7d6e97db8b1cd68b429aa2c4 | allow to run tox as 'python -m tox', which is handy on Windoze | tox/__main__.py | tox/__main__.py | from tox._cmdline import main
main()
| Python | 0 | |
f7e504652707b09c0a0b7e7b1691094ef6d35509 | add proper tomography example | examples/solvers/conjugate_gradient_tomography.py | examples/solvers/conjugate_gradient_tomography.py | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Total variation tomography using the `conjugate_gradient_normal` solver.
Solves the inverse problem
A(x) = g
Where ``A`` is a parallel beam forward projector, ``x`` the result and
``g`` is given noisy data.
"""
import numpy as np
import odl
# --- Set up the forward operator (ray transform) --- #
# Discrete reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_corner=[-20, -20], max_corner=[20, 20], nsamples=[300, 300],
dtype='float32')
# Make a parallel beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 360)
# Detector: uniformly sampled, n = 558, min = -30, max = 30
detector_partition = odl.uniform_partition(-30, 30, 558)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
# The implementation of the ray transform to use, options:
# 'scikit' Requires scikit-image (can be installed by
# running ``pip install scikit-image``).
# 'astra_cpu', 'astra_cuda' Require astra tomography to be installed.
# Astra is much faster than scikit. Webpage:
# https://github.com/astra-toolbox/astra-toolbox
impl = 'scikit'
# Ray transform aka forward projection.
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl)
# --- Generate artificial data --- #
# Create phantom
discr_phantom = odl.util.shepp_logan(reco_space, modified=True)
# Create sinogram of forward projected phantom with noise
data = ray_trafo(discr_phantom)
data += odl.util.white_noise(ray_trafo.range) * np.mean(data) * 0.1
# Optionally pass partial to the solver to display intermediate results
partial = (odl.solvers.PrintIterationPartial() &
odl.solvers.ShowPartial())
# Choose a starting point
x = ray_trafo.domain.zero()
# Run the algorithm
odl.solvers.conjugate_gradient_normal(
ray_trafo, x, data, niter=20, partial=partial)
# Display images
discr_phantom.show(title='original image')
data.show(title='convolved image')
x.show(title='deconvolved image', show=True)
| Python | 0.998527 | |
236d7d885dadcb681357212a5c6b53c28eac0aa1 | Create d1-1.py | 2018/d1-1.py | 2018/d1-1.py | with open("completed/input_c1-1.txt", "r") as f:
line = "0"
sum = 0
while line:
sum += int(line)
line = f.readline()
print("Final Frequency: {}", sum)
| Python | 0.000001 | |
8de92e74317a74b53991bdcbb3594f0e94e4cf17 | Add Monty Hall simulation | montyhall.py | montyhall.py | import random
import sys
def game():
# Place car behind one door
car = random.randint(1, 3)
# Player selects a door
first_choice = random.randint(1, 3)
reveal_options = [1, 2, 3]
# Don't reveal the car
reveal_options.remove(car)
# Don't reveal the player's choice
if first_choice in reveal_options: reveal_options.remove(first_choice)
# Reveal a door with a goat
reveal = random.choice(reveal_options)
second_options = [1, 2, 3]
# Don't select your first choice
second_options.remove(first_choice)
# Don't select the revealed door
second_options.remove(reveal)
# Choose the remaining door
second_choice = second_options[0]
# Collect and return result
first_succ = 1 if first_choice == car else 0
second_succ = 1 if second_choice == car else 0
return (first_succ, second_succ)
def simulate(rounds):
first, second = 0, 0
for i in range(rounds):
res = game()
first += res[0]
second += res[1]
print("First choice wins {:.1f}% of cases".format(first / rounds * 100))
print("Second choice wins {:.1f}% of cases".format(second / rounds * 100))
if __name__ == '__main__':
simulate(int(sys.argv[1]))
| Python | 0 | |
b177a0f2e9b42347f56c4499aaa080af97e0e530 | add validity check | 2018/04.10/python/jya_gAPIclass.2.py | 2018/04.10/python/jya_gAPIclass.2.py | import requests, base64
import config
id = config.GAPI_CONFIG['client_id']
secret = config.GAPI_CONFIG['client_secret']
type = config.GAPI_CONFIG['grant_type']
class GapiClass:
def __init__(self, host='https://gapi.gabia.com'):
self.__host = host
self.__headers = self.__encoded_token()
self.__max_retry = 5
self.__p = 1
def __Requests_get(self, url):
r = requests.get('{0}{1}'.format(self.__host, url), headers = self.__headers)
# print(r.status_code)
if (r.status_code == 401):
print("์ ํจํ์ง ์์ ํ ํฐ์
๋๋ค")
while self.__p < self.__max_retry:
self.__p += 1
self.__headers = self.__encoded_token()
self.__Requests_get(url)
elif (r.status_code == 200):
j = r.json()
return j
else:
print("๋ค์ ๊ธฐํ์")
def __Requests_post(self, url, data):
r = requests.post('{0}{1}'.format(self.__host, url), data = data)
j = r.json()
return j
def __getToken(self):
j = self.__Requests_post('/oauth/token', {'client_id': id, 'client_secret': secret, 'grant_type': type})
token_1 = j['access_token']
token_2 = 'www_front:{0}'.format(token_1)
return token_2
def __makeHeadersAuth(self, token):
encoded_text = token.encode()
k = base64.b64encode(encoded_text)
l = k.decode()
return {'Authorization': 'Basic {0}'.format(l)}
def __encoded_token(self):
return self.__makeHeadersAuth(self.__getToken())
def getMember(self, id):
j = self.__Requests_get('/members?user_id={0}'.format(id))
hanname = j['client_info']['hanadmin']
return hanname
# api1 = GapiClass()
# a = api1.getMember('planning_d')
# if __name__ == "__main__":
# print(a) | Python | 0.000001 | |
8e8e11990e430302eca24d32ba0b88dcc66233d6 | Add connect2 wifi via pyobjc | clburlison_scripts/connect2_wifi_pyobjc/connect2_wifi_pyobjc.py | clburlison_scripts/connect2_wifi_pyobjc/connect2_wifi_pyobjc.py | #!/usr/bin/python
"""
I didn't create this but I'm storing it so I can reuse it.
http://stackoverflow.com/a/34967364/4811765
"""
import objc
SSID = "MyWifiNetwork"
PASSWORD = "MyWifiPassword"
objc.loadBundle('CoreWLAN',
bundle_path='/System/Library/Frameworks/CoreWLAN.framework',
module_globals=globals())
iface = CWInterface.interface()
networks, err = iface.scanForNetworksWithName_err_(SSID, None)
network = networks.anyObject()
success, err = iface.associateToNetwork_password_err_(network, PASSWORD, None)
| Python | 0 | |
2c8370f164525f6d6045836dbc20e4279b5c90fc | Create fme-python-shutdown-script.py | upload-geospatial-data/fme/fme-python-shutdown-script.py | upload-geospatial-data/fme/fme-python-shutdown-script.py | import fme
import os
import shutil
import zipfile
import boto3
# fuction to upload your data to aws S3
def UploadFile(source_file, bucket, bucket_key, aws_key, aws_secret):
fname = str(source_file).split('\\')[-1:][0]
session = boto3.session.Session(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret,region_name='ap-southeast-2')
s3_client = session.client('s3')
s3_client.upload_file(source_file, bucket, bucket_key+"/"+fname)
file_url = '{0}/{1}/{2}'.format(s3_client.meta.endpoint_url, bucket, bucket_key)
return file_url
# Creates a zip file containing the input shapefile
# inShp: Full path to shapefile to be zipped
# Delete: Set to True to delete shapefile files after zip
def Zipfgdb(inFileGDB, Delete = True):
#Directory of file geodatabase
inLocation = os.path.dirname (inFileGDB)
#Base name of shapefile
inName = os.path.basename (os.path.splitext(inFileGDB)[0])
#Create the zipfile name
zipfl = os.path.join (inLocation, inName + ".zip")
#Create zipfile object
ZIP = zipfile.ZipFile (zipfl, "w")
#Iterate files in shapefile directory
for fl in os.listdir (inFileGDB):
#Get full path of file
inFile = os.path.join (inFileGDB, fl)
#Add file to zipfile. exclude any lock files
if os.path.splitext(fl)[1][1:] <> 'lock':
ZIP.write(inFile,fl)
#Delete filegeodatabase if indicated
if Delete == True:
shutil.rmtree(inFileGDB)
#Close zipfile object
ZIP.close()
#Return zipfile full path
return zipfl
# Creates a zip file containing the input shapefile
# inShp: Full path to shapefile to be zipped
# Delete: Set to True to delete shapefile files after zip
def ZipShp (inShp, Delete = True):
#List of shapefile file extensions
extensions = [".shp",".shx",".dbf",".sbn",".sbx",".fbn",".fbx",".ain",".aih",".atx",".ixs",".mxs",".prj",".xml",".cpg",".shp.xml"]
#Directory of shapefile
inLocation = os.path.dirname (inShp)
#Base name of shapefile
inName = os.path.basename (os.path.splitext (inShp)[0])
#Create zipfile name
zipfl = os.path.join (inLocation, inName + ".zip")
#Create zipfile object
ZIP = zipfile.ZipFile (zipfl, "w")
#Empty list to store files to delete
delFiles = []
#Iterate files in shapefile directory
for fl in os.listdir (inLocation):
#Iterate extensions
for extension in extensions:
#Check if file is shapefile file
if fl == inName + extension:
#Get full path of file
inFile = os.path.join (inLocation, fl)
#Add file to delete files list
delFiles += [inFile]
#Add file to zipfile
ZIP.write (inFile, fl)
break
#Delete shapefile if indicated
if Delete == True:
for fl in delFiles:
os.remove (fl)
#Close zipfile object
ZIP.close()
#Return zipfile full path
zipfl
def main():
try:
# Parse required fme published parameters
# note the parameters must match the paramters names used in fme workbench
print 'Executing the SLIP Selfservice automation script.'
x = fme.macroValues
AWS_KEY = x['AWS_ACCESS_KEY']
AWS_SECRET_KEY = x['AWS_SECRET']
BUCKET = x['S3_BUCKET']
BUCKET_KEY = x['S3_BUCKET_KEY']
UPLOAD = x['UP_LOAD_TO_S3']
OUTPUT_DATA = ''
# What is the name of the output
if x.has_key("DestDataset_FILEGDB"):
OUTPUT_DATA = x['DestDataset_FILEGDB']
if x.has_key("DestDataset_ESRISHAPE") in x:
OUTPUT_DATA = x['DestDataset_ESRISHAPE']
#Check the extension of my output data
extension = os.path.splitext(OUTPUT_DATA)[1]
if extension.upper() == '.GDB':
my_zip_path = Zipfgdb(OUTPUT_DATA, False)
print 'compressed version of your data is stored ' + my_zip_path
if UPLOAD == "Yes":
url = UploadFile(my_zip_path,BUCKET,BUCKET_KEY,AWS_KEY,AWS_SECRET_KEY)
print 'Your Data has been uploaded to ' + url
elif extension.upper() == '.SHP':
my_zip_path = ZipShp(OUTPUT_DATA, False)
print 'compressed version of your data is stored ' + my_zip_path
if UPLOAD == "Yes":
url = UploadFile(my_zip_path,my_zip_path,BUCKET,BUCKET_KEY,AWS_KEY,AWS_SECRET_KEY)
print 'Your Data has been uploaded to ' + url
except:
print("Unexpected error when executing the SLIP Selfservice automation script.:", sys.exc_info()[0])
raise
if __name__ == "__main__":
main()
| Python | 0.000016 | |
a6d6b833e33dc465b0fa828018e2cbba748f8282 | Add utility class for evaluation | pygraphc/evaluation/EvaluationUtility.py | pygraphc/evaluation/EvaluationUtility.py |
class EvaluationUtility(object):
@staticmethod
def convert_to_text(graph, clusters):
# convert clustering result from graph to text
new_clusters = {}
for cluster_id, nodes in clusters.iteritems():
for node in nodes:
members = graph.node[node]['member']
for member in members:
new_clusters.setdefault(cluster_id, []).append(member)
return new_clusters
| Python | 0 | |
a2a2d6ab7edaa6fab9d2fb95586fde8f1f74b1cc | add new package (#24672) | var/spack/repos/builtin/packages/py-aniso8601/package.py | var/spack/repos/builtin/packages/py-aniso8601/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAniso8601(PythonPackage):
"""A library for parsing ISO 8601 strings."""
homepage = "https://bitbucket.org/nielsenb/aniso8601"
pypi = "aniso8601/aniso8601-9.0.1.tar.gz"
version('9.0.1', sha256='72e3117667eedf66951bb2d93f4296a56b94b078a8a95905a052611fb3f1b973')
depends_on('py-setuptools', type='build')
| Python | 0 | |
0ee3990781488c54b3d45c722b843a06a28da235 | Add test that explores exploitability of tilted agents | verification/action_tilted_agents_exploitability_test.py | verification/action_tilted_agents_exploitability_test.py | import unittest
import numpy as np
import os
import matplotlib.pyplot as plt
import acpc_python_client as acpc
from tools.constants import Action
from weak_agents.action_tilted_agent import create_agent_strategy_from_trained_strategy, TiltType
from tools.io_util import read_strategy_from_file
from evaluation.exploitability import Exploitability
FIGURES_FOLDER = 'verification/action_tilted_agents'
KUHN_EQUILIBRIUM_STRATEGY_PATH = 'strategies/kuhn.limit.2p-equilibrium.strategy'
LEDUC_EQUILIBRIUM_STRATEGY_PATH = 'strategies/leduc.limit.2p-equilibrium.strategy'
TILT_TYPES = [
('fold-add', Action.FOLD, TiltType.ADD),
('call-add', Action.CALL, TiltType.ADD),
('raise-add', Action.RAISE, TiltType.ADD),
('fold-multiply', Action.FOLD, TiltType.MULTIPLY),
('call-multiply', Action.CALL, TiltType.MULTIPLY),
('raise-multiply', Action.RAISE, TiltType.MULTIPLY),
]
class ActionTiltedAgentsExploitabilityTest(unittest.TestCase):
def test_plot_kuhn_agent_exploitabilities(self):
self.create_agents_and_plot_exploitabilities({
'title': 'Kuhn poker action tilted agents exploitability',
'figure_filename': 'kuhn_action_tilted_agents',
'base_strategy_path': KUHN_EQUILIBRIUM_STRATEGY_PATH,
'game_file_path': 'games/kuhn.limit.2p.game',
'tilt_probabilities': np.arange(-1, 1, 0.01),
})
def test_plot_leduc_agent_exploitabilities(self):
self.create_agents_and_plot_exploitabilities({
'title': 'Leduc Hold\'em action tilted agents exploitability',
'figure_filename': 'leduc_action_tilted_agents',
'base_strategy_path': LEDUC_EQUILIBRIUM_STRATEGY_PATH,
'game_file_path': 'games/leduc.limit.2p.game',
'tilt_probabilities': np.arange(-1, 1, 0.1),
})
def create_agents_and_plot_exploitabilities(self, test_spec):
base_strategy = read_strategy_from_file(
test_spec['game_file_path'],
test_spec['base_strategy_path'])
game = acpc.read_game_file(test_spec['game_file_path'])
exploitability = Exploitability(game)
equilibrium_exploitability = exploitability.evaluate(base_strategy)
tilt_probabilities = test_spec['tilt_probabilities']
exploitability_values = np.zeros([len(TILT_TYPES), len(tilt_probabilities)])
for i, tilt_type in enumerate(TILT_TYPES):
for j, tilt_probability in enumerate(tilt_probabilities):
tilted_agent = create_agent_strategy_from_trained_strategy(
test_spec['game_file_path'],
base_strategy,
tilt_type[1],
tilt_type[2],
tilt_probability)
exploitability_values[i, j] = exploitability.evaluate(tilted_agent)
plt.figure(dpi=160)
for j in range(i + 1):
plt.plot(
tilt_probabilities,
exploitability_values[j],
label=TILT_TYPES[j][0],
linewidth=0.8)
plt.plot(
tilt_probabilities,
[equilibrium_exploitability] * len(tilt_probabilities),
'r--',
label='Equilibrium',
linewidth=1.5)
plt.title(test_spec['title'])
plt.xlabel('Tilt probability')
plt.ylabel('Agent exploitability [mbb/g]')
plt.grid()
plt.legend()
figure_output_path = '%s/%s.png' % (FIGURES_FOLDER, test_spec['figure_filename'])
figures_directory = os.path.dirname(figure_output_path)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
plt.savefig(figure_output_path)
| Python | 0.000001 | |
f9a8642e3c5cfbce2e949c019dce3d538eefcd43 | Juan Question | JuanQuestion/Juan.py | JuanQuestion/Juan.py | from string import uppercase
from string import lowercase
if __name__ == '__main__':
asking = True
print("Juan Questions")
print("Presione 1 para salir")
while asking == True:
response = input("Pregunta algo: ")
if response.endswith("?") :
print("Ofi")
elif response >= 'A' and response <= 'Z':
print("Chillea")
elif response == "" :
print("mmm")
elif response == " " :
print("Me da igual")
elif response == "1" :
print("Salir")
asking = False
break
| Python | 0.999983 | |
0c9c3a801077c241cc32125ab520746935b04f89 | Create LAElectionResults.py | LAElectionResults.py | LAElectionResults.py | # The MIT License (MIT)
# Copyright (C) 2014 Allen Plummer, https://www.linkedin.com/in/aplummer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import feedparser
import argparse
from BeautifulSoup import BeautifulSoup
class CandidateIssue:
def __init__(self,name):
self.Name = name
self.TotalNumber = ''
self.Percentage = ''
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
template = "{0:40}{1:10}{2:15}"
printstring = template.format(self.Name, self.Percentage, self.TotalNumber)
#return self.Name + " " + self.Percentage + ", Number of Votes: " + self.TotalNumber
return printstring
class Election:
def __init__(self,title):
self.Title = title
self.Progress = ''
self.CandidateIssues = []
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
printstring = '======' + self.Title
printstring += '\nProgress: ' + self.Progress
for c in self.CandidateIssues:
printstring += '\n' + str(c)
return printstring
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Gets Election Results')
parser.add_argument('-url','--url',help='Base URL',required=True)
parser.add_argument('-election', '--election', help='Election to show', required=False)
args = vars(parser.parse_args())
url = args['url']
efilter = args['election']
d = feedparser.parse(url)
elections = []
for item in d.entries:
title = item['title_detail']['value']
election = Election(title)
soup = BeautifulSoup(item['summary'])
tables = soup.findChildren('table')
# This will get the first (and only) table. Your page may have more.
my_table = tables[0]
# You can find children with multiple tags by passing a list of strings
rows = my_table.findChildren(['th', 'tr'])
i = 0
for row in rows:
i += 1
cells = row.findChildren('td')
if i == 1:
election.Progress = cells[0].text.strip()
else:
candidate = CandidateIssue(cells[0].text.strip())
candidate.Percentage = cells[1].text.strip()
candidate.TotalNumber = cells[2].text.strip()
election.CandidateIssues.append(candidate)
elections.append(election)
for e in elections:
if efilter != None:
if e.Title.upper().find(efilter.upper()) >= 0:
print(e)
else:
print(e)
| Python | 0 | |
6a6abadc2395810076b89fb38c759f85426a0304 | Add framework for own SVM from scratch | supportVectorMachine/howItWorksSupportVectorMachine.py | supportVectorMachine/howItWorksSupportVectorMachine.py | # -*- coding: utf-8 -*-
"""Support Vector Machine (SVM) classification for machine learning.
SVM is a binary classifier. The objective of the SVM is to find the best
separating hyperplane in vector space which is also referred to as the
decision boundary. And it decides what separating hyperplane is the 'best'
because the distance from it and the associating data it is separating is the
greatest at the plane in question.
This is the file where I create the algorithm from scratch.
dataset is breast cancer data from: http://archive.ics.uci.edu/ml/datasets.html
Example:
$ python howItWorksSupportVectorMachine.py
Todo:
* Sketch out the framework
"""
# minimize magnitude(w) and maximize b
# with constraint y_i*(x_i*w+b)>=1
# or Class*(KnownFeatures*w+b)>=1
| Python | 0 | |
020779ae0f8a13b70981f174d6391a2cf3b6593a | Add tests for api model validation and default values. | st2common/tests/unit/test_api_model_validation.py | st2common/tests/unit/test_api_model_validation.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.models.api.base import BaseAPI
class MockAPIModel(BaseAPI):
model = None
schema = {
'title': 'MockAPIModel',
'description': 'Test',
'type': 'object',
'properties': {
'id': {
'description': 'The unique identifier for the action runner.',
'type': ['string', 'null'],
'default': None
},
'name': {
'description': 'The name of the action runner.',
'type': 'string',
'required': True
},
'description': {
'description': 'The description of the action runner.',
'type': 'string'
},
'enabled': {
'type': 'boolean',
'default': True
},
'parameters': {
'type': 'object'
},
'permission_grants': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'resource_uid': {
'type': 'string',
'description': 'UID of a resource to which this grant applies to.',
'required': False,
'default': 'unknown'
},
'enabled': {
'type': 'boolean',
'default': True
},
'description': {
'type': 'string',
'description': 'Description',
'required': False
}
}
},
'default': []
}
},
'additionalProperties': False
}
class APIModelValidationTestCase(unittest2.TestCase):
def test_validate_default_values_are_set(self):
# no "permission_grants" attribute
mock_model_api = MockAPIModel(name='name')
self.assertEqual(getattr(mock_model_api, 'id', 'notset'), 'notset')
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(getattr(mock_model_api, 'enabled', None), None)
self.assertEqual(getattr(mock_model_api, 'permission_grants', None), None)
mock_model_api_validated = mock_model_api.validate()
self.assertEqual(mock_model_api.id, None)
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(mock_model_api.enabled, True)
self.assertEqual(mock_model_api.permission_grants, [])
self.assertEqual(mock_model_api_validated.name, 'name')
self.assertEqual(mock_model_api_validated.enabled, True)
self.assertEqual(mock_model_api_validated.permission_grants, [])
# "permission_grants" attribute present, but child missing
mock_model_api = MockAPIModel(name='name', enabled=False,
permission_grants=[{}, {'description': 'test'}])
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(mock_model_api.enabled, False)
self.assertEqual(mock_model_api.permission_grants, [{}, {'description': 'test'}])
mock_model_api_validated = mock_model_api.validate()
self.assertEqual(mock_model_api.name, 'name')
self.assertEqual(mock_model_api.enabled, False)
self.assertEqual(mock_model_api.permission_grants,
[{'resource_uid': 'unknown', 'enabled': True},
{'resource_uid': 'unknown', 'enabled': True, 'description': 'test'}])
self.assertEqual(mock_model_api_validated.name, 'name')
self.assertEqual(mock_model_api_validated.enabled, False)
self.assertEqual(mock_model_api_validated.permission_grants,
[{'resource_uid': 'unknown', 'enabled': True},
{'resource_uid': 'unknown', 'enabled': True, 'description': 'test'}])
| Python | 0 | |
2e1c257c0215f398e4ac5cc7d2d20ffa62492817 | Create NewChatAlert.pyw | NewChatAlert.pyw | NewChatAlert.pyw | # TODO: Check for cookie expiration
# TODO: Check for failed request
# TODO: Check for rejected cookie
# TODO: Get Cookie from other browsers (IE and Firefox)
# - See https://bitbucket.org/richardpenman/browser_cookie (and perhaps contribute)?
from os import getenv
from sqlite3 import connect
from win32crypt import CryptUnprotectData
from requests import post
from ctypes import windll
from time import sleep, ctime
# Function that displays a message box
def MsgBox(title, text, style):
windll.user32.MessageBoxW(0, text, title, style)
# Function that returns session cookie from chrome
def GetSecureCookie(name):
# Connect to Chrome's cookies db
cookies_database_path = getenv(
"APPDATA") + r"\..\Local\Google\Chrome\User Data\Default\Cookies"
conn = connect(cookies_database_path)
cursor = conn.cursor()
# Get the encrypted cookie
cursor.execute(
"SELECT encrypted_value FROM cookies WHERE name IS \"" + name + "\"")
results = cursor.fetchone()
# Close db
conn.close()
if results == None:
decrypted = None
else:
decrypted = CryptUnprotectData(results[0], None, None, None, 0)[
1].decode("utf-8")
return decrypted
# Function that returns chat status using a provided session cookie
def GetChatRequestCount(cookie):
# Ask TeamSupport for the chat status using cookie
response = post(
"https://app.teamsupport.com/chatstatus",
cookies={"TeamSupport_Session": cookie},
data='{"lastChatMessageID": -1, "lastChatRequestID": -1}'
)
return response.json()["ChatRequestCount"]
def main():
# Loop forever - checking for new chat requests
while True:
cookie = GetSecureCookie("TeamSupport_Session")
if cookie == None:
MsgBox("Session cookie not found",
"""TeamSupport session cookie could not be found in Chrome store
New chat notifications will not work until this is resolved
Log in to TeamSupport using Chrome to fix this""",
16)
# Pause for 30 seconds before trying again
sleep(30)
else:
chat_request_count = GetChatRequestCount(cookie)
# Alert if there are new chat requests or log if none
if chat_request_count == 0:
print(ctime() + " - No new chat requests")
elif chat_request_count == 1:
MsgBox("New Chat Request", "There is 1 new chat request", 64)
else:
MsgBox("New Chat Requests", "There are " +
str(chat_request_count) + " chat requests", 48)
# Pause for 10 seconds before checking again
sleep(10)
if __name__ == "__main__":
main()
| Python | 0 | |
1d4938232aa103ea2a919796e9fa35e2699d41d9 | Create PythonAnswer2.py | PythonAnswer2.py | PythonAnswer2.py | def fibonacci(x):
a = 0 #first number
b = 1 #second number
for x in range(x - 1):
a, b = b, a + b #a becomes b and b becomes a and b added together
return a #returns the next number in the sequence
print "Fibonacci Answer"
for x in range(1, 35): #number of times I need the sequence to run to reach 4million
print fibonacci(x)
| Python | 0.999202 | |
a6887ccd608b90b9f78fa4422850752686ed3897 | set no proxy for querying the triplestore | QueryLauncher.py | QueryLauncher.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os, time, tempfile
from pprint import pformat
from SPARQLWrapper import SPARQLWrapper, JSON
import requests
import logging
import urllib.request
from askomics.libaskomics.ParamManager import ParamManager
class SPARQLError(RuntimeError):
"""
The SPARQLError returns an error message when a query sends by requests module encounters an error.
"""
def __init__(self, response):
self.status_code = response.status_code
super().__init__(response.text)
class QueryLauncher(ParamManager):
"""
The QueryLauncher process sparql queries:
- execute_query send the query to the sparql endpoint specified in params.
- parse_results preformat the query results
- format_results_csv write in the tabulated result file a table obtained
from these preformated results using a ResultsBuilder instance.
"""
def __init__(self, settings, session):
ParamManager.__init__(self, settings, session)
self.log = logging.getLogger(__name__)
def execute_query(self, query, log_raw_results=True):
"""Params:
- libaskomics.rdfdb.SparqlQuery
- log_raw_results: if True the raw json response is logged. Set to False
if you're doing a select and parsing the results with parse_results.
"""
# Set no proxy
proxy_support = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
if self.log.isEnabledFor(logging.DEBUG):
# Prefixes should always be the same, so drop them for logging
query_log = query #'\n'.join(line for line in query.split('\n')
# if not line.startswith('PREFIX '))
self.log.debug("----------- QUERY --------------\n%s", query_log)
urlupdate = None
if self.is_defined("askomics.updatepoint"):
urlupdate = self.get_param("askomics.updatepoint")
time0 = time.time()
if self.is_defined("askomics.endpoint"):
data_endpoint = SPARQLWrapper(self.get_param("askomics.endpoint"), urlupdate)
else:
raise ValueError("askomics.endpoint")
if self.is_defined("askomics.endpoint.username") and self.is_defined("askomics.endpoint.passwd"):
user = self.get_param("askomics.endpoint.username")
passwd = self.get_param("askomics.endpoint.passwd")
data_endpoint.setCredentials(user, passwd)
elif self.is_defined("askomics.endpoint.username"):
raise ValueError("askomics.passwd")
elif self.is_defined("askomics.endpoint.passwd"):
raise ValueError("askomics.username")
if self.is_defined("askomics.endpoint.auth"):
data_endpoint.setHTTPAuth(self.get_param("askomics.endpoint.auth")) # Basic or Digest
data_endpoint.setQuery(query)
data_endpoint.method = 'POST'
if data_endpoint.isSparqlUpdateRequest():
data_endpoint.setMethod('POST')
# Hack for Virtuoso to LOAD a turtle file
if self.is_defined("askomics.hack_virtuoso"):
hack_virtuoso = self.get_param("askomics.hack_virtuoso")
if hack_virtuoso.lower() == "ok" or hack_virtuoso.lower() == "true":
data_endpoint.queryType = 'SELECT'
print(data_endpoint)
results = data_endpoint.query()
time1 = time.time()
else:
data_endpoint.setReturnFormat(JSON)
results = data_endpoint.query().convert()
time1 = time.time()
queryTime = time1 - time0
if self.log.isEnabledFor(logging.DEBUG):
if log_raw_results:
self.log.debug("------- RAW RESULTS -------------- (t=%.3fs)\n%s", queryTime, pformat(results))
else:
self.log.debug("------- QUERY DONE ------------ (t=%.3fs)", queryTime)
return results
def parse_results(self, json_res):
parsed = [
{
sparql_variable: entry[sparql_variable]["value"]
for sparql_variable in entry.keys()
} for entry in json_res["results"]["bindings"]
]
# debug log is guarded since formatting is time consuming
if self.log.isEnabledFor(logging.DEBUG):
if not parsed:
self.log.debug("-------- NO RESULTS --------------")
else:
if len(parsed) > 10:
log_res = pformat(parsed[:10])[:-1]
log_res += ',\n ...] (%d results omitted)' % (len(parsed) - 10, )
else:
log_res = pformat(parsed)
self.log.debug("----------- RESULTS --------------\n%s", log_res)
return parsed
def process_query(self, query):
json_query = self.execute_query(query, log_raw_results=False)
results = self.parse_results(json_query)
return results
def format_results_csv(self, table):
if not os.path.isdir("askomics/static/results"):
os.mkdir('askomics/static/results')
with tempfile.NamedTemporaryFile(dir="askomics/static/results/", prefix="data_"+str(time.time()).replace('.', ''), suffix=".csv", mode="w+t", delete=False) as fp:
fp.write(table)
return "/static/results/"+os.path.basename(fp.name)
# TODO see if we can make a rollback in case of malformed data
def load_data(self, url, graphName):
"""
Load a ttl file accessible from http into the triple store using LOAD method
:param url: URL of the file to load
:return: The status
"""
self.log.debug("Loading into triple store (LOAD method) the content of: %s", url)
query_string = "LOAD <"+url+"> INTO GRAPH"+ " <" + graphName + ">"
res = self.execute_query(query_string)
return res
def upload_data(self, filename, graphName):
"""
Load a ttl file into the triple store using requests module and Fuseki
upload method which allows upload of big data into Fuseki (instead of LOAD method).
:param filename: name of the file, fp.name from Source.py
:return: response of the request and queryTime
Not working for Virtuoso because there is no upload files url.
"""
self.log.debug("Loading into triple store (HTTP method) the content of: %s", filename)
data = {'graph': graphName}
files = [('file', (os.path.basename(filename), open(filename), 'text/turtle'))]
time0 = time.time()
response = requests.post(self.get_param("askomics.file_upload_url"), data=data, files=files)
if response.status_code != 200:
raise SPARQLError(response)
self.log.debug("---------- RESPONSE FROM HTTP : %s", response.raw.read())
time1 = time.time()
queryTime = time1 - time0
if self.log.isEnabledFor(logging.DEBUG):
self.log.debug("------- UPLOAD DONE --------- (t=%.3fs)\n%s", queryTime, pformat(response))
return response
# TODO see if we can make a rollback in case of malformed data
def insert_data(self, ttl_string, graph, ttl_header=""):
"""
Load a ttl string into the triple store using INSERT DATA method
:param ttl_string: ttl content to load
:param ttl_header: the ttl header associated with ttl_string
:return: The status
"""
self.log.debug("Loading into triple store (INSERT DATA method) the content: "+ttl_string[:500]+"[...]")
query_string = ttl_header
query_string += "\n"
query_string += "INSERT DATA {\n"
query_string += "\tGRAPH "+ "<" + graph + ">" +"\n"
query_string += "\t\t{\n"
query_string += ttl_string + "\n"
query_string += "\t\t}\n"
query_string += "\t}\n"
res = self.execute_query(query_string)
return res
| Python | 0 | |
5c60be411e61d5edfbf658509b437973d596a3ba | Create server.py | Networking/server.py | Networking/server.py | # -*- coding: utf-8 -*-
import socket, math
# demarrage du serveur
server = "127.0.0.1"
port = 55042
mysock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
mysock.bind((server, port))
JOG_IP = [None,None,None,None,None,None,None,None,None,None]
JOG_coordinates= [None,None,None,None,None,None,None,None,None,None]
ennemy_coordinates = [0.0,0.0]
# ne peut etre appelee qu'a condition que tous les tableaux soient remplis
def update_coordinates() :
global JOG_IP, JOG_coordinates, ennemy_coordinates
for e in JOG_IP :
# determination des deux plus proches voisins
JOG_ID = JOG_IP.index(e)
current_coordinates = JOG_coordinates[JOG_ID]
distances = [float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf"),float("inf")]
for c in JOG_coordinates :
if c != current_coordinates :
distances[JOG_coordinates.index(c)] = math.sqrt( (c[0]-current_coordinates[0])**2 + (c[1]-current_coordinates[1])**2 )
neighbour1_ID = distances.index(min(distances))
distances[distances.index(min(distances))] = max(distances)
neighbour2_ID = distances.index(min(distances))
# formatage et envoi du message
msg_coordinates = 'C'+' '+'A'+str(JOG_coordinates[neighbour1_ID][0])+' '+'A'+str(JOG_coordinates[neighbour1_ID][1])+' '+'B'+str(JOG_coordinates[neighbour2_ID][0])+' '+'B'+str(JOG_coordinates[neighbour1_ID][1])+' '+'T'+str(ennemy_coordinates[0])+'T'+str(ennemy_coordinates[1])+' '+'T'+str(ennemy_velocity[1])+'V'+str(ennemy_velocity[1])
mysock.sendto(msg_coordinates, e)
while True :
msg, client = mysock.recvfrom(255)
if msg :
msg_parts = msg.split()
JOG_IP[msg_parts[0]] = client
if msg_parts[1] == 'C' : # cas oรน le message reรงu est une mise ร jour de la position
JOG_coordinates[msg_parts[0]] = [float(msg_parts[2]), float(msg_parts[3])]
elif msg_parts[1] == 'E' : # cas oรน le message reรงu est une erreur
# TODO
pass
if not ((None in JOG_IP) | (None in JOG_coordinates)) :
update_coordinates()
| Python | 0.000001 | |
92ec849fc18d7cb610839abe2213ce30ceced46b | Add ci settings file for postgresql database | InvenTree/InvenTree/ci_postgresql.py | InvenTree/InvenTree/ci_postgresql.py | """
Configuration file for running tests against a MySQL database.
"""
from InvenTree.settings import *
# Override the 'test' database
if 'test' in sys.argv:
eprint('InvenTree: Running tests - Using MySQL test database')
DATABASES['default'] = {
# Ensure postgresql backend is being used
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'inventree_test_db',
'USER': 'postgres',
'PASSWORD': '',
}
| Python | 0 | |
f7db5d9cac80432a7016043a1b2781fbaa7f040e | Create new package. (#6891) | var/spack/repos/builtin/packages/r-rappdirs/package.py | var/spack/repos/builtin/packages/r-rappdirs/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRappdirs(RPackage):
"""An easy way to determine which directories on the users computer
you should use to save data, caches and logs. A port of Python's
'Appdirs' to R."""
homepage = "https://cran.r-project.org/package=rappdirs"
url = "https://cran.rstudio.com/src/contrib/rappdirs_0.3.1.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/rappdirs"
version('0.3.1', 'fbbdceda2aa49374e61c7d387bf9ea21')
depends_on('r@2.14:', type=('build', 'run'))
| Python | 0 | |
6d2efcea281775c31cd1df29eac63054e3fe51df | Create solution.py | data_structures/linked_list/problems/delete_n_after_m/py/solution.py | data_structures/linked_list/problems/delete_n_after_m/py/solution.py | import LinkedList
# Problem description:
# Solution time complexity:
# Comments:
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def DeleteNAfterMNodes(head: LinkedList.Node, n: int, m: int) -> LinkedList.Node:
if head == None:
return None
slow = head
while slow != None:
for _ in range(m - 1):
if slow == None:
break
else:
slow = slow.nxt
if slow == None:
break
else:
fast = slow.nxt
for _ in range(n):
if fast == None:
break
else:
fast = fast.nxt
slow.nxt = fast
slow = slow.nxt
return head
| Python | 0.000018 | |
c84ce4b2494771c48890c122420e4665828ac4f8 | Solve Code Fights different rightmost bit problem | CodeFights/differentRightmostBit.py | CodeFights/differentRightmostBit.py | #!/usr/local/bin/python
# Code Different Right-most Bit (Core) Problem
def differentRightmostBit(n, m):
return (n ^ m) & -(n ^ m)
def main():
tests = [
[11, 13, 2],
[7, 23, 16],
[1, 0, 1],
[64, 65, 1],
[1073741823, 1071513599, 131072],
[42, 22, 4]
]
for t in tests:
res = differentRightmostBit(t[0], t[1])
if t[2] == res:
print("PASSED: differentRightmostBit({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: differentRightmostBit({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
| Python | 0.00005 | |
8d0d564eae53a10b98b488b8c13eb952134cfc5e | Create 0408_country_body_part.py | 2018/0408_country_body_part.py | 2018/0408_country_body_part.py | #!/usr/bin/python
'''
NPR 2018-04-08
http://www.npr.org/puzzle
Name part of the human body, insert a speech hesitation, and you'll name a country โ what is it?
'''
from nltk.corpus import gazetteers
import nprcommontools as nct
#%%
BODY_PARTS = nct.get_category_members('body_part')
# COUNTRIES
COUNTRIES = frozenset([x.lower() for x in gazetteers.words('countries.txt')])
#%%
for c in COUNTRIES:
for b in BODY_PARTS:
if c.startswith(b[0]) and c.endswith(b[-1]):
for i in range(1,len(b)-1):
if c.startswith(b[:i]) and c.endswith(b[i:]):
print b,c
| Python | 0.000023 | |
61ec74a685deec0b1ddc0a9274e5df0a597c6b6b | Create TweetStreamer.py | TweetStreamer.py | TweetStreamer.py | import tweepy
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import json
from elasticsearch import Elasticsearch
import datetime
from watson_developer_cloud import NaturalLanguageUnderstandingV1
import watson_developer_cloud.natural_language_understanding.features.v1 as Features
"""
This twitter code uses a user's numerical ID and will track their tweets live as the come in. Runs through watson's NLU
API and then uploads to ES.
"""
consumer_key="YBFMgErZkiN8MWqBGcHXm2dCp"
consumer_secret="fmuMKwya4XyyjegvSyYAwBalZYI8heom3Ds56hkxVZmBuRNQ6t"
access_token="918660934528155648-InbzRO92y5NFmhGEmiGI7NGc0wxZhAO"
access_token_secret="mn3PehlsuJwJnQ4dlMC3cASwMyqlC0GHPT2uok8KbJltt"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Setup elasticsearch
es = Elasticsearch("10.0.2.81:9200")
# Setup watson NLU API
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2017-05-19',
username='3efc3d64-d9ee-43b3-a289-e530bad6347b',
password='uDs5p3a4CPyd')
def natural_language(tweet):
response = natural_language_understanding.analyze(
text=tweet,
features=[Features.Sentiment(), Features.Emotion()])
return response
def fix_tstamp(tstamp):
# Mon Oct 16 12:57:50 +0000 2017
date = tstamp.replace(" +0000", "")
date = datetime.datetime.strptime(date, '%a %b %d %H:%M:%S %Y')
return str(date)
class listener(StreamListener):
def on_data(self, data):
print(data)
data = json.loads(data)
if not data['retweeted'] and '@realDonaldTrump' not in data['text']:
data["created_at"] = fix_tstamp(data["created_at"])
indexdate = data["created_at"][:7]
try:
data["watson_natural_lang"] = (natural_language(data["text"]))
except:
print data["text"]
pass
print data
#es.index(index='presidentialtweets-' + indexdate, doc_type='twitter', id=data["id"], body=data)
return(True)
def on_error(self, status):
print status
def main():
twitterStream = Stream(auth, listener())
twitterStream.filter(follow=['25073877'])
if __name__ == '__main__':
main()
| Python | 0.000001 | |
3345dc2f1ac15f06d3e95b5ead894ee9d3a27d9e | Add file writer utility script | piwrite.py | piwrite.py | #!/bin/env python
import argparse
import sys
import os
parser = argparse.ArgumentParser(description="Write multiple svgs from stdin to files")
parser.add_argument('-o', '--outfile', metavar='OUTFILE', default='output.svg')
args = parser.parse_args()
base, extension = os.path.splitext(args.outfile)
def write_files(collection):
for i,s in enumerate(collection):
f = open(base + "%06d" % i + extension, 'w')
f.write(s)
f.close()
write_files(sys.stdin.readlines())
| Python | 0.000001 | |
7147dfc237acb64a8e655e63681a387282043994 | Add lc0031_next_permutation.py | lc0031_next_permutation.py | lc0031_next_permutation.py | """Leetcode 31. Next Permutation
Medium
URL: https://leetcode.com/problems/next-permutation/
Implement next permutation, which rearranges numbers into the lexicographically
next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible
order (ie, sorted in ascending order).
The replacement must be in-place and use only constant extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding
outputs are in the right-hand column.
1,2,3 -> 1,3,2
3,2,1 -> 1,2,3
1,1,5 -> 1,5,1
"""
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000001 | |
46d5b197e022815c2074fbc94ca324d31d470dd0 | Implement a fasttext example (#3446) | examples/imdb_fasttext.py | examples/imdb_fasttext.py | '''This example demonstrates the use of fasttext for text classification
Based on Joulin et al's paper:
Bags of Tricks for Efficient Text Classification
https://arxiv.org/abs/1607.01759
Can achieve accuracy around 88% after 5 epochs in 70s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Embedding
from keras.layers import AveragePooling1D
from keras.datasets import imdb
from keras import backend as K
# set parameters:
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 20
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_length=model.output_shape[1]))
# We flatten the output of the AveragePooling1D layer
model.add(Flatten())
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
| Python | 0 | |
dde0efeec1aca8ed3ec2e444bbb4c179be89fec5 | Create MooreNeightbourhood.py | Checkio/MooreNeightbourhood.py | Checkio/MooreNeightbourhood.py | def count_neighbours(grid, row, col):
neig = 0
if (col - 1 >= 0):
if (grid[row][col - 1] == 1):
neig += 1
if (col - 1 >= 0 and row - 1 >= 0):
if (grid[row - 1][col -1] == 1):
neig += 1
if (row - 1 >= 0):
if (grid[row - 1][col] == 1):
neig += 1
if (col + 1 < len(grid[0]) and row - 1 >= 0):
if (grid[row - 1][col + 1] == 1):
neig += 1
if (col + 1 < len(grid[0])):
if (grid[row][col + 1] == 1):
neig += 1
if (col + 1 < len(grid[0]) and row + 1 < len(grid)):
if (grid[row + 1][col + 1] == 1):
neig += 1
if (row + 1 < len(grid)):
if (grid[row + 1][col] == 1):
neig += 1
if (col - 1 >= 0 and row + 1 < len(grid)):
if (grid[row + 1][col - 1] == 1):
neig += 1
return neig
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert count_neighbours(((1, 0, 0, 1, 0),
(0, 1, 0, 0, 0),
(0, 0, 1, 0, 1),
(1, 0, 0, 0, 0),
(0, 0, 1, 0, 0),), 1, 2) == 3, "1st example"
assert count_neighbours(((1, 0, 0, 1, 0),
(0, 1, 0, 0, 0),
(0, 0, 1, 0, 1),
(1, 0, 0, 0, 0),
(0, 0, 1, 0, 0),), 0, 0) == 1, "2nd example"
assert count_neighbours(((1, 1, 1),
(1, 1, 1),
(1, 1, 1),), 0, 2) == 3, "Dense corner"
assert count_neighbours(((0, 0, 0),
(0, 1, 0),
(0, 0, 0),), 1, 1) == 0, "Single"
| Python | 0 | |
5dd5bf4b189d7a9af64ebad29cfdaceb013a9e6f | Create Classes.py | Sharing/reports/Classes.py | Sharing/reports/Classes.py | import pprint # Allows Pretty Print of JSON
#############################################
# Define a Class to represent a group
#############################################
class Group:
def __init__(self):
self.group_name = ''
self.group_members = ''
self.group_permission = ''
self.group_type = ''
def __init__(self, name, members_count, permision, group_type):
self.group_name = name
self.group_members = members_count
self.group_permission = permision
self.group_type = group_type
#############################################
# Define a Class to represent a user
#############################################
class User:
def __init__(self):
self.user_access_type = ''
self.user_permission_inherited = ''
self.user_email = ''
self.user_on_team = ''
self.user_name = ''
def __init__(self, user_access_type, user_permission_inherited, user_email, user_on_team, user_name):
self.user_access_type = user_access_type
self.user_permission_inherited = user_permission_inherited
self.user_email = user_email
self.user_on_team = user_on_team
self.user_name = user_name
#############################################
# Define a Class to represent a shared folder
#############################################
class SharedFolder:
def __init__(self):
self.team_member_id = ''
self.team_member_name = ''
self.team_member_email = ''
self.email_is_verified = ''
self.account_status = ''
self.is_team_folder = ''
self.folder_name = ''
self.share_folder_id = ''
self.time_invited = ''
self.path_lower = ''
self.mount_status = '' # If path_lower is empty folder is UNMOUNTED
self.preview_url = ''
self.folder_permission = ''
self.groups = [] # List of Groups with access
self.invitees = [] # List of Invited users
self.users = [] # List of Users with access
def getPathLower(self):
if( self.path_lower == None ):
return ''
else:
return self.path_lower
def addGroup(self, name, members_count, permision, group_type):
grp = Group( name, members_count, permision, group_type )
self.groups.append( grp )
def addUser(self, user_access_type, user_permission_inherited, user_email, user_on_team, user_name):
usr = User( user_access_type, user_permission_inherited, user_email, user_on_team, user_name)
self.users.append( usr )
def getUsers(self):
#pprint.pprint ( len( self.users ))
return self.users
def getExternallyOwnedFolderRow(self):
row = []
extUser = None
# Find the user that is external owner
for aUser in self.users:
#print ( 'aUser: %s | %s | %s | %s' % (aUser.user_name, aUser.user_email, aUser.user_access_type, aUser.user_on_team))
if ( aUser.user_access_type == 'owner' and aUser.user_on_team == False ):
extUser = aUser
break
row.append( '' if (extUser == None or extUser.user_email == None) else extUser.user_email ) #self.team_member_email #'Owner email'
row.append( '' if (extUser == None or extUser.user_name == None) else extUser.user_name ) #self.team_member_name #'Owner Name',
row.append( self.folder_name ) #'Folder Name'
row.append( self.getPathLower() ) #'Folder Path'
row.append( self.share_folder_id ) #'Share Folder ID'
row.append( '' ) #'Folder Mount Status'
row.append( self.team_member_email ) #'User Email'
row.append( self.folder_permission ) #'User Access Type'
row.append( str(False) ) #'User on Team'
row.append( '' ) #'Group Name'
row.append( '' ) #'Group Members'
row.append( '' ) #'Group Permission'
row.append( '' ) #'Group Type'
row.append( str(False) ) # 'Team owned folder'
return row
def getOwnerOwnedFolderRows(self):
rows = []
# Build a list sharing
# One row per groups
for grp in self.groups:
row = []
row.append( self.team_member_email ) #'Owner email'
row.append( self.team_member_name ) #'Owner Name',
row.append( self.folder_name ) #'Name'
row.append( self.getPathLower() ) #'Folder Path'
row.append( self.share_folder_id ) # 'Share Folder ID'
row.append( self.mount_status ) #'Folder Mount Status'
row.append( '' ) # Collaborator Email
row.append( '' ) # Collaborator Permissions
row.append( '' ) # Collaborator on Team
row.append( grp.group_name ) #'Group Name'
row.append( str(grp.group_members) ) #'Group Members'
row.append( grp.group_permission ) #'Group Permission'
row.append( grp.group_type ) #'Group Type'
row.append( str(True) ) # 'Team owned folder'
rows.append ( row )
# One row per user
for aUser in self.users:
row = []
row.append( self.team_member_email ) #'Owner email',
row.append( self.team_member_name ) #'Owner Name',
row.append( self.folder_name ) #'Name'
row.append( self.getPathLower() ) #'Folder Path'
row.append( self.share_folder_id ) #'Share Folder ID'
row.append( self.mount_status ) #'Folder Mount Status'
row.append( aUser.user_email ) #'User Email'
row.append( aUser.user_access_type ) #'User Access Type'
row.append( str(aUser.user_on_team) ) #'User on Team'
row.append( '' ) #'Group Name'
row.append( '' ) #'Group Members'
row.append( '' ) #'Group Permission'
row.append( '' ) #'Group Type'
row.append( str(True) ) # 'Team owned folder'
rows.append ( row )
return rows
# Method to check if this folder is owned by the user
def isOwnedByUser(self):
return self.folder_permission == 'owner'
# Method to check if this folder is owned by a team member
def isOwnedByTeamMember(self):
# Check that User is owner of folder
if ( self.isOwnedByUser() ):
return False
for aUser in self.users:
if ( aUser.user_access_type == 'owner' and aUser.user_on_team == True ):
return True
return False
| Python | 0 | |
a3df0567c295f0b2879c9a4f095a31108359d531 | Add missing migration for invoice status | nodeconductor/billing/migrations/0003_invoice_status.py | nodeconductor/billing/migrations/0003_invoice_status.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('billing', '0002_pricelist'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='status',
field=models.CharField(max_length=80, blank=True),
preserve_default=True,
),
]
| Python | 0 | |
269a34e87797a3271013e23d504a6f6a159ae48e | Index testgroup_test.test_id | migrations/versions/3a3366fb7822_index_testgroup_test.py | migrations/versions/3a3366fb7822_index_testgroup_test.py | """Index testgroup_test.test_id
Revision ID: 3a3366fb7822
Revises: 139e272152de
Create Date: 2014-01-02 22:20:55.132222
"""
# revision identifiers, used by Alembic.
revision = '3a3366fb7822'
down_revision = '139e272152de'
from alembic import op
def upgrade():
op.create_index('idx_testgroup_test_test_id', 'testgroup_test', ['test_id'])
def downgrade():
op.drop_index('idx_testgroup_test_test_id', 'testgroup_test')
| Python | 0.000011 | |
7b40a4902d1dc43c73a7858fc9286a641b3a9666 | Add validation function removed from main script. | assess_isoform_quantification/options.py | assess_isoform_quantification/options.py | from schema import Schema
def validate_file_option(file_option, msg):
msg = "{msg} '{file}'.".format(msg=msg, file=file_option)
return Schema(open, error=msg).validate(file_option)
| Python | 0 | |
d04118acc5421d4b48e31c78874a740eb469c3d7 | fix boan1244 'Boรยซng' | migrations/versions/506dcac7d75_fix_boan1244_mojibake.py | migrations/versions/506dcac7d75_fix_boan1244_mojibake.py | # coding=utf-8
"""fix boan1244 mojibake
Revision ID: 506dcac7d75
Revises: 4513ba6253e1
Create Date: 2015-04-15 19:20:59.059000
"""
# revision identifiers, used by Alembic.
revision = '506dcac7d75'
down_revision = '4513ba6253e1'
import datetime
from alembic import op
import sqlalchemy as sa
def upgrade():
id, before, after = 'boan1244', u'Bo\xc3\xabng', u'Bo\xebng'
update_name = sa.text('UPDATE language SET updated = now(), '
'name = :after WHERE id = :id AND name = :before')
update_ident = sa.text('UPDATE identifier SET updated = now(), '
'name = :after WHERE type = :type AND name = :before ')
op.execute(update_name.bindparams(id=id, before=before, after=after))
op.execute(update_ident.bindparams(type='name', before=before, after=after))
def downgrade():
pass
| Python | 0.000001 | |
85e4a327ba641fbe9c275b4760c60683ca215d61 | Add unit tests. | test_pto.py | test_pto.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for PTO."""
import unittest
import pto
import time
_TIMEOUT = 5
_FUZZ_FACTOR = 1
class SlowClass(object):
@pto.timeout(_TIMEOUT)
def slow_instance_method(self):
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
@classmethod
@pto.timeout(_TIMEOUT)
def slow_class_method(cls):
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
@staticmethod
@pto.timeout(_TIMEOUT)
def slow_static_method():
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
class PtoTestCase(unittest.TestCase):
def setUp(self):
self.slowInstance = SlowClass()
def tearDown(self):
pass
def test_function(self):
@pto.timeout(_TIMEOUT)
def slow_func():
cut_off = time.time() + _TIMEOUT
while time.time() < cut_off + _FUZZ_FACTOR:
pass
return True
self.assertRaises(pto.TimedOutException, slow_func)
def test_instance_method(self):
self.assertRaises(pto.TimedOutException, self.slowInstance.slow_instance_method)
def test_class_method(self):
self.assertRaises(pto.TimedOutException, self.slowInstance.slow_class_method)
def test_static_method(self):
self.assertRaises(pto.TimedOutException, SlowClass.slow_static_method)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
2067bdb9d0f9947a674cb94d0c988049f3038ea4 | create test stubs | test_viz.py | test_viz.py | def test_create_distance_matrix():
pass
def test_get_translation_table():
pass
def test_naive_backtranslate():
pass
def test_get_peptide_index():
pass
def test_demo_dna_features_viewer():
pass
def test_ngrams():
pass
def test_make_trigrams():
pass
def test_nucleotide_distribution():
pass
def test_get_peptide_toplot():
pass
def test_peptide_distribution():
pass
def test_plot_ABI():
pass
def test_get_genbank_sequence():
pass
def test_get_fasta_sequence():
pass
def test_calc_sequence_similarity():
pass
def test_make_parser():
pass
def test_main():
pass
| Python | 0.000002 | |
e304aae71617cdba0ffcb720a24406375fb866a1 | Copy of Ryan's PCMToWave component. | Sketches/MH/audio/ToWAV.py | Sketches/MH/audio/ToWAV.py | from Axon.Component import component
import string
import struct
from Axon.Ipc import producerFinished, shutdown
class PCMToWave(component):
def __init__(self, bytespersample, samplingfrequency):
super(PCMToWave, self).__init__()
self.bytespersample = bytespersample
self.samplingfrequency = samplingfrequency
if self.bytespersample not in [2,4]:
print "Currently bytespersample must be 2 or 4"
raise ValueError
bytestofunction = { 2: self.sample2Byte, 4: self.sample4Byte }
self.pack = bytestofunction[self.bytespersample]
def sample2Byte(self, value):
return struct.pack("<h", int(value * 32768.0))
def sample4Byte(self, value):
return struct.pack("<l", int(value * 2147483648.0))
def main(self):
#we don't know the length yet, so we say the file lasts an arbitrary (long) time
riffchunk = "RIFF" + struct.pack("<L", 0xEFFFFFFF) + "WAVE"
bytespersecond = self.bytespersample * self.samplingfrequency
formatchunk = "fmt "
formatchunk += struct.pack("<L", 0x10) #16 for PCM
formatchunk += struct.pack("<H", 0x01) #PCM/Linear quantization
formatchunk += struct.pack("<H", 0x01) #mono
formatchunk += struct.pack("<L", self.samplingfrequency)
formatchunk += struct.pack("<L", bytespersecond)
formatchunk += struct.pack("<H", self.bytespersample)
formatchunk += struct.pack("<H", self.bytespersample * 8)
self.send(riffchunk, "outbox")
self.send(formatchunk, "outbox")
datachunkheader = "data" + struct.pack("<L", 0xEFFFFFFF) #again, an arbitrary (large) value
self.send(datachunkheader, "outbox")
running = True
while running:
yield 1
codedsamples = []
while self.dataReady("inbox"): # we accept lists of floats
samplelist = self.recv("inbox")
for sample in samplelist:
if sample < -1:
sample = -1
elif sample > 1:
sample = 1
codedsamples.append(self.pack(sample))
del samplelist
if codedsamples:
self.send(string.join(codedsamples, ""), "outbox")
while self.dataReady("control"): # we accept lists of floats
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
return
self.pause()
| Python | 0.000002 | |
19b283e2810b98da275ff60d0e0632e2337f4375 | Add vde evaluate script. | test/vde.py | test/vde.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# std import
import sys
import argparse
import os.path
from collections import defaultdict
import csv
import re
def main():
""" The main function of vde no argument """
enable_input = [name.split("2")[0] for name in globals().keys()
if name.endswith("2eva")]
parser = argparse.ArgumentParser(
prog="vde",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-e", "--experiment",
type=str,
help="File of experimente result.",
required=True)
parser.add_argument("-t", "--truth",
type=str,
help="File of truth result.",
required=True)
parser.add_argument("-d", "--delta",
type=int,
help="Acceptable diff betwen truth and experimente.",
default=5)
parser.add_argument("-ef", "--experiment-format",
type=str,
help="Format of experiment file",
choices=enable_input,
default="eva")
parser.add_argument("-tf", "--truth-format",
type=str,
help="Format of truth file",
choices=enable_input,
default="eva")
# parsing cli argument
argument = vars(parser.parse_args())
expfunc = globals()[argument["experiment_format"]+"2eva"]
truthfunc = globals()[argument["truth_format"]+"2eva"]
experiment, count = expfunc(argument["experiment"])
truth, count = truthfunc(argument["truth"])
result = compare(experiment, truth, argument["delta"])
result_printing(result, count)
def result_printing(result, count):
""" Printing the result in csv format """
head = ",".join(("type", "TP", "FP", "recall", "precision"))
print(head)
for gap in result.keys():
total = result[gap]["TP"] + result[gap]["FP"]
recall = 1 if total == 0 else result[gap]["TP"]/total
prec = 1 if count[gap] == 0 else result[gap]["TP"]/count[gap]
print(",".join((str(gap),
str(result[gap]["TP"]),
str(result[gap]["FP"]),
str(recall),
str(prec))))
def compare(exp, truth, delta):
""" Compare experimente and truth return TP FP precision and recall
for each type """
result = defaultdict(lambda: defaultdict(int))
for exp_pos in exp.keys():
for type_gap in exp[exp_pos]:
find = False
if exp_pos in truth and type_gap in truth[exp_pos]:
__iterate_result(result, type_gap, "TP")
find = True
else:
for i in range(1, delta+1):
prev_pos = str(int(exp_pos) - i)
next_pos = str(int(exp_pos) + i)
if prev_pos in truth and type_gap in truth[prev_pos]:
__iterate_result(result, type_gap, "TP")
find = True
break
if next_pos in truth and type_gap in truth[next_pos]:
__iterate_result(result, type_gap, "TP")
find = True
break
if not find:
__iterate_result(result, type_gap, "FP")
return result
def eva2eva(filename):
""" Read eva file and return value in dict
position is key and type is value """
__check_file_exist(filename)
data = defaultdict(set)
count = defaultdict(int)
with open(filename) as csvfile:
linereader = csv.reader(csvfile)
{__add_in_data_count(val[0], val[1], data, count)
for val in linereader}
return data, count
def breakpoints2eva(filename):
""" Read breakpoint file and return value in dict
position is key and type is value """
__check_file_exist(filename)
data = defaultdict(set)
count = defaultdict(int)
mtg2eva = {"HOM": "homo",
"HET": "hete",
"SNP": "snp",
"MSNP": "multi_snp",
"DEL": "deletion"}
findpos = re.compile(r'pos_(\d+)')
findtype = re.compile(r'_([a-zA-Z]+)$')
with open(filename) as filehand:
for line in filehand:
line = line.strip()
if line.startswith(">"):
__add_in_data_count(findpos.search(line).group(1),
mtg2eva[findtype.search(line).group(1)],
data, count)
return data, count
def __iterate_result(result, type_gap, tpofp):
""" If key is in dict iterate this else init this. """
if type_gap in result.keys():
result[type_gap][tpofp] += 1
else:
result[type_gap][tpofp] = 1
def __add_in_data_count(pos, type_gap, data, counter):
""" Add value pos: type_gap in data and increment counter[data] """
data[pos].add(type_gap)
counter[type_gap] += 1
def __check_file_exist(filename):
""" If file doesn't exist trow assert """
assert os.path.isfile(filename), "Error when I try open " + filename
if(__name__ == '__main__'):
main()
| Python | 0 | |
16275938769c16c79b89349612e8e7b2891de815 | Add migration for user manager | kolibri/auth/migrations/0008_auto_20180222_1244.py | kolibri/auth/migrations/0008_auto_20180222_1244.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-02-22 20:44
from __future__ import unicode_literals
import kolibri.auth.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('kolibriauth', '0007_auto_20171226_1125'),
]
operations = [
migrations.AlterModelManagers(
name='facilityuser',
managers=[
('objects', kolibri.auth.models.FacilityUserModelManager()),
],
),
]
| Python | 0.000001 | |
74450edae5d327659ec618f7e160bc5dd37bd512 | ๆทปๅ ๅจPython2DWrapperไธญไฝฟ็จ็Pythonๆไปถ | PluginSDK/BasicRecon/Python/Py2C.py | PluginSDK/BasicRecon/Python/Py2C.py | from PIL import Image
import numpy as np
import matplotlib
"""
This is Module Py2C for c++
"""
class A: pass
class B: pass
def ShowImage(image, width, height):
img = [[]] * width
for x in range(width):
for y in range(height):
img[x] = img[x] + [image[x*width + y]]
npimg = np.array(img)
npimg = npimg / npimg.max() *255
pil_image = Image.fromarray(npimg)
pil_image.show()
return 'success!'
## image is 2d list
def ShowImage2D(image, width, height):
pil_image = Image.fromarray(np.array(image))
pil_image2 = Image.fromarray(np.array(image)*2)
pil_image.show()
pil_image2.show()
return np.array(image)
if __name__=='__main__':
width = 256
height = 256
li = [i for i in range(width*height)]
image = ShowImage(li, width, height)
li2d = [[i for j in range(height)] for i in range(width)] # *width+j)*255/(width*height)
image2d = ShowImage2D(li2d,width, height)
| Python | 0 | |
883aac8a282d4525e82d3eb151ea293c5577424c | Add data migration to create gesinv | core/migrations/0002_auto_20141008_0853.py | core/migrations/0002_auto_20141008_0853.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_extra_users(apps, schema_editor):
user = apps.get_model("auth.User").objects.create(username='GesInv-ULL')
apps.get_model("core", "UserProfile").objects.create(user=user,
documento='00000000A')
def delete_extra_users(apps, schema_editor):
user = apps.get_model("auth.User").objects.get(username='GesInv-ULL')
apps.get_model("core", "UserProfile").objects.get(user=user).delete()
user.delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RunPython(create_extra_users, delete_extra_users),
]
| Python | 0 | |
eceffd58e62378287aacab29f4849d0a6e983e49 | Factor out the timestamp-related functionality to a class that outputs json strings | src/Timestamp.py | src/Timestamp.py | #!/usr/bin/python
import time
class Timestamp:
hour = -1
minute = -1
def __init__(self, date):
self.year = int(date[0:4])
self.month = int(self.month_name_to_num(date[5:8]))
self.day = int(date[9:11])
try:
self.hour = int(date[12:14])
self.minute = int(date[15:17])
except ValueError:
return
def month_name_to_num(self, month):
return time.strptime(month, '%b').tm_mon
def json(self):
json = ''
units = ["year", "month", "day", "hour", "minute"]
value = {"year":self.year, "month":self.month, "day":self.day,
"hour":self.hour, "minute":self.minute}
for key in units:
json += '"' + key + '":' + str(value[key]) + ','
return '{' + json[:-1] + '}'
def main():
# Test Timestamp class:
p = Timestamp("2222 Jan 26 0210") # full date
print p.year, p.month, p.day, p.hour, p.minute
print p.json()
q = Timestamp("2000 Feb 13") # no hours/minutes
print q.year, q.month, q.day, q.hour, q.minute
print q.json()
if __name__ == "__main__":
main()
| Python | 0.999997 | |
e6181c5d7c95af23ee6d51d125642104782f5cf1 | Add solution for 136_Single Number with XOR operation. | Python/136_SingleNumber.py | Python/136_SingleNumber.py | class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#Using XOR to find the single number.
#Because every number appears twice, while N^N=0, 0^N=N,
#XOR is cummutative, so the order of elements does not matter.
#Finally, it will be res = 0 ^ singlenumber ==> res = singlenumber
res = 0
for num in nums:
res ^= num
return res
nums = [1,1,5,5,3,4,4,9,9,8,8,7,7]
foo = Solution()
print foo.singleNumber(nums)
| Python | 0 | |
83ebfe1ff774f8d5fb5ae610590ca8fca1c87100 | add migration for on_delete changes | app/backend/wells/migrations/0034_auto_20181127_0230.py | app/backend/wells/migrations/0034_auto_20181127_0230.py | # Generated by Django 2.1.3 on 2018-11-27 02:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0033_auto_20181119_1857'),
]
operations = [
migrations.AlterField(
model_name='activitysubmission',
name='decommission_method',
field=models.ForeignKey(blank=True, db_column='decommission_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.DecommissionMethodCode', verbose_name='Method of Decommission'),
),
migrations.AlterField(
model_name='productiondata',
name='well_yield_unit',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode'),
),
migrations.AlterField(
model_name='well',
name='aquifer',
field=models.ForeignKey(blank=True, db_column='aquifer_id', null=True, on_delete=django.db.models.deletion.PROTECT, to='aquifers.Aquifer', verbose_name='Aquifer ID Number'),
),
migrations.AlterField(
model_name='well',
name='bcgs_id',
field=models.ForeignKey(blank=True, db_column='bcgs_id', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.BCGS_Numbers', verbose_name='BCGS Mapsheet Number'),
),
migrations.AlterField(
model_name='well',
name='decommission_method',
field=models.ForeignKey(blank=True, db_column='decommission_method_code', null='True', on_delete=django.db.models.deletion.PROTECT, to='wells.DecommissionMethodCode', verbose_name='Method of Decommission'),
),
migrations.AlterField(
model_name='well',
name='observation_well_status',
field=models.ForeignKey(blank=True, db_column='obs_well_status_code', null='True', on_delete=django.db.models.deletion.PROTECT, to='wells.ObsWellStatusCode', verbose_name='Observation Well Status'),
),
]
| Python | 0 | |
59c62bb0f13be7910bf2280126a0909ffbe716f0 | Create simple_trie.py | simple_trie.py | simple_trie.py | class Trie:
def __init__(self):
self.node = {}
self.word = None
def add(self,string):
node = self.node
currentNode = None
for char in string:
currentNode = node.get(char, None)
if not currentNode:
node[char] = Trie()
currentNode = node[char]
node = currentNode.node
currentNode.word = string
def find(self, query):
node = self
result = []
for char in query:
currentNode = node.node.get(char, None)
if not currentNode:
return result
node = currentNode
return self.findall(node, result)
def findall(self, node, result):
if node.word:
result.append(node.word)
for value in node.node.values():
self.findall(value, result)
return result
t = Trie()
t.add("cat")
t.add("cats")
t.add("cow")
t.add("camp")
print t.find('c')
print t.find('ca')
print t.find("abcde")
print t.find("cows")
| Python | 0.000002 | |
983f041b25b0de77f3720378e12b22e7d8f2e040 | Create same_first_last.py | Python/CodingBat/same_first_last.py | Python/CodingBat/same_first_last.py | # http://codingbat.com/prob/p179078
def same_first_last(nums):
return ( len(nums) >= 1 and nums[0] == nums[-1] )
| Python | 0.00113 | |
ae6c6f3aa0863b919e0f00543cab737ae9e94129 | Add bubblesort as a placeholder for a refactored implementation | bubblesort.py | bubblesort.py | #!/usr/bin/env python
# TBD: Sort animation could take a pattern that it assumed to be "final",
# shuffle it, then take a sort generator that produced a step in the sort
# algorithm at every call. It would be sorting shuffled indices that the
# animation would use to construct each frame.
from blinkytape import blinkytape, blinkycolor, blinkyplayer
from patterns import gradient
import random, sys, time
tape = blinkytape.BlinkyTape.find_first()
start_color = blinkycolor.BlinkyColor.from_string(sys.argv[1])
end_color = blinkycolor.BlinkyColor.from_string(sys.argv[2])
pattern = gradient.Gradient(tape.pixel_count, start_color, end_color)
indexes = range(0, tape.pixel_count)
random.shuffle(indexes)
pixels = [pattern.pixels[index] for index in indexes]
tape.update(pixels)
time.sleep(5)
swap_occurred = True
while swap_occurred:
swap_occurred = False
for i in range(1, tape.pixel_count):
if indexes[i - 1] > indexes[i]:
temp = indexes[i - 1]
indexes[i - 1] = indexes[i]
indexes[i] = temp
swap_occurred = True
pixels = [pattern.pixels[index] for index in indexes]
tape.update(pixels)
| Python | 0 | |
3852ae6fcf6271ef19a182e5dfb199e4539536a1 | Create 6kyu_spelling_bee.py | Solutions/6kyu/6kyu_spelling_bee.py | Solutions/6kyu/6kyu_spelling_bee.py | from itertools import zip_longest as zlo
def how_many_bees(hive):
return sum(''.join(x).count('bee') + ''.join(x).count('eeb') for x in hive) + \
sum(''.join(y).count('bee') + ''.join(y).count('eeb') for y in zlo(*hive, fillvalue = '')) if hive else 0
| Python | 0.000103 | |
ddb58206a52ef46f5194bf6f5c11ac68b16ab9a8 | Create minimum-window-subsequence.py | Python/minimum-window-subsequence.py | Python/minimum-window-subsequence.py | # Time: O(S * T)
# Space: O(S)
class Solution(object):
def minWindow(self, S, T):
"""
:type S: str
:type T: str
:rtype: str
"""
dp = [[None for _ in xrange(len(S))] for _ in xrange(2)]
for i, c in enumerate(S):
if c == T[0]:
dp[0][i] = i
for j in xrange(1, len(T)):
prev = None
dp[j%2] = [None] * len(S)
for i, c in enumerate(S):
if prev is not None and c == T[j]:
dp[j%2][i] = prev
if dp[(j-1)%2][i] is not None:
prev = dp[(j-1)%2][i]
start, end = 0, len(S)
for j, i in enumerate(dp[(len(T)-1)%2]):
if i >= 0 and j-i < end-start:
start, end = i, j
return S[start:end+1] if end < len(S) else ""
| Python | 0.00046 | |
eaca815937ebd1fbdd6ec5804dc52257d2775181 | Create time-gui.py | time-gui.py | time-gui.py | from tkinter import *
import time
import sys
import datetime as dt
from datetime import timedelta
def validateTextInputSize(event):
""" Method to Validate Entry text input size """
global TEXT_MAXINPUTSIZE
if (event.widget.index(END) >= TEXT_MAXINPUTSIZE - 1):
event.widget.delete(TEXT_MAXINPUTSIZE - 1)
def displayText():
""" Display the Entry text value. """
global entryWidget
if entryWidget.get().strip() == "":
tkMessageBox.showerror("Tkinter Entry Widget", "Enter a text value")
else:
tkMessageBox.showinfo("Tkinter Entry Widget", "Text value =" + entryWidget.get().strip())
if __name__ == "__main__":
main = Tk()
main.title("Main Widget")
main["padx"] = 40
main["pady"] = 2
# Create a text frame to hold the text Label and the Entry widget
textFrame = Frame(main)
#Create a Label in textFrame
l1 = Label(textFrame)
l1["text"] = "Enter time of arrival:"
l1.grid(row=0, column=0)
# Create an Entry Widget in textFrame
e1 = Entry(textFrame)
e1.bind("<Key>", validateTextInputSize)
e1["width"] = 2
e1.grid(row=0, column=1)
e1.insert(0, "6")
e1.config(bg="white")
l2 = Label(textFrame)
l2["text"] = ":"
l2.grid(row=0, column=2)
e2 = Entry(textFrame)
e2.bind("<Key>", validateTextInputSize)
e2["width"] = 2
e2.grid(row=0, column=3)
e2.insert(0, "00")
e2.config(bg="white")
#Create a Label in textFrame
l3 = Label(textFrame)
l3["text"] = "How long will you work?:"
l3.grid(row=1, column=0)
# Create an Entry Widget in textFrame
e3 = Entry(textFrame)
e3.bind("<Key>", validateTextInputSize)
e3["width"] = 2
e3.grid(row=1, column=1)
e3.insert(0, "8")
e3.config(bg="white")
l4 = Label(textFrame)
l4["text"] = ":"
l4.grid(row=1, column=2)
e4 = Entry(textFrame)
e4.bind("<Key>", validateTextInputSize)
e4["width"] = 2
e4.grid(row=1, column=3)
e4.insert(0, "00")
e4.config(bg="white")
l5 = Label(textFrame)
l5["text"] = "And lunch? :"
l5.grid(row=1, column=4)
e5 = Entry(textFrame)
e5.bind("<Key>", validateTextInputSize)
e5["width"] = 2
e5.grid(row=1, column=5)
e5.insert(0, "30")
e5.config(bg="white")
l6 = Label(textFrame)
l6["text"] = "minutes"
l6.grid(row=1, column=6)
textFrame.pack()
clock = Label(main, font=('times', 20, 'bold'), bg='green')
clock.pack(fill=BOTH, expand=1)
def tick():
s = time.strftime('%H:%M:%S')
if s != clock["text"]:
clock["text"] = s
clock.after(200, tick)
tick()
main.mainloop()
| Python | 0.000002 | |
077d4b8954918ed51c43429efd74b4911083c4f4 | Add instance_id field. | kolibri/content/migrations/0002_auto_20160630_1959.py | kolibri/content/migrations/0002_auto_20160630_1959.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-06-30 19:59
from __future__ import unicode_literals
from django.db import migrations, models
import kolibri.content.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contentnode',
options={},
),
migrations.AddField(
model_name='contentnode',
name='instance_id',
field=kolibri.content.models.UUIDField(default=uuid.uuid4, editable=False, max_length=32, unique=True),
),
migrations.AlterField(
model_name='contentnode',
name='kind',
field=models.CharField(blank=True, choices=[('topic', 'Topic'), ('video', 'Video'), ('audio', 'Audio'), ('exercise', 'Exercise'), ('document', 'Document'), ('image', 'Image')], max_length=200),
),
migrations.AlterField(
model_name='file',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'mp4'), ('vtt', 'vtt'), ('srt', 'srt'), ('mp3', 'mp3'), ('pdf', 'pdf')], max_length=40),
),
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High resolution video'), ('low_res_video', 'Low resolution video'), ('vector_video', 'Vertor video'), ('thumbnail', 'Thumbnail'), ('thumbnail', 'Thumbnail'), ('caption', 'Caption')], max_length=150),
),
]
| Python | 0 | |
48172fa94043fb004dfaf564afac42e632be2bc0 | add test for DataManager | mpf/tests/test_DataManager.py | mpf/tests/test_DataManager.py | """Test the bonus mode."""
import time
from unittest.mock import mock_open, patch
from mpf.file_interfaces.yaml_interface import YamlInterface
from mpf.core.data_manager import DataManager
from mpf.tests.MpfTestCase import MpfTestCase
class TestDataManager(MpfTestCase):
def testSaveAndLoad(self):
YamlInterface.cache = False
open_mock = mock_open(read_data="")
with patch('mpf.file_interfaces.yaml_interface.open', open_mock, create=True):
manager = DataManager(self.machine, "machine_vars")
self.assertTrue(open_mock.called)
self.assertNotIn("hallo", manager.get_data())
open_mock = mock_open(read_data="")
with patch('mpf.file_interfaces.yaml_interface.open', open_mock, create=True):
with patch('mpf.core.file_manager.os.rename') as move_mock:
manager.save_key("hallo", "world")
while not move_mock.called:
time.sleep(.00001)
open_mock().write.assert_called_once_with('hallo: world\n')
self.assertTrue(move_mock.called)
open_mock = mock_open(read_data='hallo: world\n')
with patch('mpf.file_interfaces.yaml_interface.open', open_mock, create=True):
manager2 = DataManager(self.machine, "machine_vars")
self.assertTrue(open_mock.called)
self.assertEqual("world", manager2.get_data()["hallo"])
YamlInterface.cache = True
| Python | 0 | |
8d0f6ed81377e516c5bb266894a8cf39b6852383 | add multiple rsi sample | examples/multi_rsi.py | examples/multi_rsi.py | # ๅฏไปฅ่ชๅทฑimportๆไปฌๅนณๅฐๆฏๆ็็ฌฌไธๆนpythonๆจกๅ๏ผๆฏๅฆpandasใnumpy็ญใ
import talib
import numpy as np
import math
import pandas
# ๅจ่ฟไธชๆนๆณไธญ็ผๅไปปไฝ็ๅๅงๅ้ป่พใcontextๅฏน่ฑกๅฐไผๅจไฝ ็็ฎๆณ็ญ็ฅ็ไปปไฝๆนๆณไน้ดๅไผ ้ใ
def init(context):
#้ๆฉๆไปฌๆๅ
ด่ถฃ็่ก็ฅจ
context.s1 = "000001.XSHE"
context.s2 = "601988.XSHG"
context.s3 = "000068.XSHE"
context.stocks = [context.s1,context.s2,context.s3]
update_universe(context.stocks)
context.TIME_PERIOD = 14
context.HIGH_RSI = 85
context.LOW_RSI = 30
context.ORDER_PERCENT = 0.3
# ไฝ ้ๆฉ็่ฏๅธ็ๆฐๆฎๆดๆฐๅฐไผ่งฆๅๆญคๆฎต้ป่พ๏ผไพๅฆๆฅๆๅ้ๅๅฒๆฐๆฎๅ็ๆ่
ๆฏๅฎๆถๆฐๆฎๅ็ๆดๆฐ
def handle_bar(context, bar_dict):
# ๅผๅง็ผๅไฝ ็ไธป่ฆ็็ฎๆณ้ป่พ
# bar_dict[order_book_id] ๅฏไปฅๆฟๅฐๆไธช่ฏๅธ็barไฟกๆฏ
# context.portfolio ๅฏไปฅๆฟๅฐ็ฐๅจ็ๆ่ต็ปๅ็ถๆไฟกๆฏ
# ไฝฟ็จorder_shares(id_or_ins, amount)ๆนๆณ่ฟ่ก่ฝๅ
# TODO: ๅผๅง็ผๅไฝ ็็ฎๆณๅง๏ผ
# ๅฏนๆไปฌ้ไธญ็่ก็ฅจ้ๅ่ฟ่กloop๏ผ่ฟ็ฎๆฏไธๅช่ก็ฅจ็RSIๆฐๅผ
for stock in context.stocks:
# ่ฏปๅๅๅฒๆฐๆฎ
prices = history(context.TIME_PERIOD+1,'1d','close')[stock].values
# ็จTalib่ฎก็ฎRSIๅผ
rsi_data = talib.RSI(prices,timeperiod=context.TIME_PERIOD)[-1]
curPosition = context.portfolio.positions[stock].quantity
#็จๅฉไฝ็ฐ้็30%ๆฅ่ดญไนฐๆฐ็่ก็ฅจ
target_available_cash = context.portfolio.cash * context.ORDER_PERCENT
#ๅฝRSIๅคงไบ่ฎพ็ฝฎ็ไธ้้ๅผ๏ผๆธ
ไป่ฏฅ่ก็ฅจ
if rsi_data > context.HIGH_RSI and curPosition > 0:
order_target_value(stock,0)
#ๅฝRSIๅฐไบ่ฎพ็ฝฎ็ไธ้้ๅผ๏ผ็จๅฉไฝcash็ไธๅฎๆฏไพ่กฅไป่ฏฅ่ก
if rsi_data < context.LOW_RSI:
logger.info("target available cash caled: " + str(target_available_cash))
#ๅฆๆๅฉไฝ็็ฐ้ไธๅคไธๆ - 100shares๏ผ้ฃไนไผ่ขซricequant ็order management system rejectๆ
order_value(stock, target_available_cash)
| Python | 0 | |
859b3de112549f070e7b56901b86d40e8b8c1f51 | update scorer | lib/scorer.py | lib/scorer.py | import numpy as np
import pandas as pd
import json
#from SPARQLWrapper import SPARQLWrapper, JSON
from collections import defaultdict
from operator import itemgetter
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
import optparse
from ranking import ndcg_at_k, average_precision
def scorer(embeddings, gold_standard,N, similarity):
similarity = similarity
gold_standard = pd.read_table(gold_standard, header = None)
candidate_scores = defaultdict(list)
sorted_candidate_scores = defaultdict(list)
e2v_embeddings = get_e2v_embedding(embeddings)
ndcg = {}
AP = {}
for i in gold_standard.values:
query_wiki_id = int(i[2])
candidate_wiki_id = int(i[4])
truth_value = int(i[5])
print query_wiki_id, candidate_wiki_id, truth_value
query_e2v = e2v_embeddings[e2v_embeddings[0] == query_wiki_id].values #query vector = [0.2,-0.3,0.1,0.7 ...]
candidate_e2v = e2v_embeddings[e2v_embeddings[0] == candidate_wiki_id].values
print query_e2v, candidate_e2v
candidate_scores[query_wiki_id].append((similarity_function(query_e2v,candidate_e2v, similarity),truth_value))
for q in candidate_scores.keys():
sorted_candidate_scores[q] = sorted(candidate_scores[q], key = itemgetter(0), reverse = True)
relevance = []
for score, rel in sorted_candidate_scores[q]:
relevance.append(rel)
ndcg[q] = ndcg_at_k(relevance,N)
AP[q] = average_precision(relevance)
print sorted_candidate_scores
print np.mean(ndcg.values()), np.mean(AP.values())
def similarity_function(vec1,vec2, similarity):
#compute cosine similarity or other similarities
v1 = np.array(vec1)
v2 = np.array(vec2)
if len(v1)*len(v2) == 0: #any of the two is 0
global count
count +=1
return 0
else:
if similarity == 'cosine':
return cosine_similarity(v1,v2)[0][0] #returns an double array [[sim]]
elif similarity == 'L1':
return 0
elif similarity == 'L2':
return 0
elif similarity == 'linear_kernel':
return linear_kernel(v1,v2)[0][0]
else:
raise NameError('Choose a valid similarity function')
def get_e2v_embedding(embeddings):
#read the embedding file into a dictionary
emb = pd.read_table(embeddings, skiprows = 1, header = None, sep = ' ')
return emb
def wiki_to_local(wiki_id):
url = get_url_from_id(wiki_id)
url = '<'+url+'>'
json_open = open('dictionaries/dictionary_dbpedia2015.json')
json_string = json_open.read()
json_open.close()
json_dict = json.loads(json_string)
local_id = json_dict[url]
return local_id
def get_url_from_id(wiki_id):
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX db: <http://dbpedia.org/resource/>
SELECT *
WHERE { ?s dbo:wikiPageID %d }
""" %wiki_id)
sparql.setReturnFormat(JSON)
return str(sparql.query().convert()['results']['bindings'][0]['s']['value'])
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'file_name', help = 'file_name')
parser.add_option('-g','--gold', dest = 'gold_standard_name', help = 'gold_standard_name')
parser.add_option('-N','--number', dest = 'N', help = 'cutting threshold scorers',type = int)
parser.add_option('-s','--similarity', dest = 'similarity', help = 'similarity measure')
(options, args) = parser.parse_args()
if options.file_name is None:
options.file_name = raw_input('Enter file name:')
if options.gold_standard_name is None:
options.gold_standard_name = raw_input('Enter gold standard name:')
if options.N is None:
options.N = 10
if options.similarity is None:
options.similarity = 'cosine'
file_name = options.file_name
gold_standard_name = options.gold_standard_name
N = options.N
similarity = options.similarity
count = 0
scorer(file_name, gold_standard_name,N, similarity)
print count | Python | 0.000001 | |
3dcfc2f7e9a2ed696a2b4a006e4d8a233a494f2f | move sitemap to core | djangobb_forum/sitemap.py | djangobb_forum/sitemap.py | from django.contrib.sitemaps import Sitemap
from djangobb_forum.models import Forum, Topic
class SitemapForum(Sitemap):
priority = 0.5
def items(self):
return Forum.objects.all()
class SitemapTopic(Sitemap):
priority = 0.5
def items(self):
return Topic.objects.all() | Python | 0 | |
a6f26893189376f64b6be5121e840acc4cfeebae | ADD utils.py : model_to_json / expand_user_database methods | packages/syft/src/syft/core/node/common/tables/utils.py | packages/syft/src/syft/core/node/common/tables/utils.py | # grid relative
from .groups import Group
from .usergroup import UserGroup
from .roles import Role
def model_to_json(model):
"""Returns a JSON representation of an SQLAlchemy-backed object."""
json = {}
for col in model.__mapper__.attrs.keys():
if col != "hashed_password" and col != "salt":
if col == "date" or col == "created_at" or col == "destroyed_at":
# Cast datetime object to string
json[col] = str(getattr(model, col))
else:
json[col] = getattr(model, col)
return json
def expand_user_object(user, db):
def get_group(user_group):
query = db.session().query
group = user_group.group
group = query(Group).get(group)
group = model_to_json(group)
return group
query = db.session().query
user = model_to_json(user)
user["role"] = query(Role).get(user["role"])
user["role"] = model_to_json(user["role"])
user["groups"] = query(UserGroup).filter_by(user=user["id"]).all()
user["groups"] = [get_group(user_group) for user_group in user["groups"]]
return user
def seed_db(db):
new_role = Role(
name="User",
can_triage_requests=False,
can_edit_settings=False,
can_create_users=False,
can_create_groups=False,
can_edit_roles=False,
can_manage_infrastructure=False,
can_upload_data=False,
)
db.add(new_role)
new_role = Role(
name="Compliance Officer",
can_triage_requests=True,
can_edit_settings=False,
can_create_users=False,
can_create_groups=False,
can_edit_roles=False,
can_manage_infrastructure=False,
can_upload_data=False,
)
db.add(new_role)
new_role = Role(
name="Administrator",
can_triage_requests=True,
can_edit_settings=True,
can_create_users=True,
can_create_groups=True,
can_edit_roles=False,
can_manage_infrastructure=False,
can_upload_data=True,
)
db.add(new_role)
new_role = Role(
name="Owner",
can_triage_requests=True,
can_edit_settings=True,
can_create_users=True,
can_create_groups=True,
can_edit_roles=True,
can_manage_infrastructure=True,
can_upload_data=True,
)
db.add(new_role)
db.commit()
| Python | 0.000004 | |
d7d0af678a52b357ecf479660ccee1eab43c443f | Add gender choices model | accelerator/models/gender_choices.py | accelerator/models/gender_choices.py | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from accelerator_abstract.models import BaseGenderChoices
class GenderChoices(BaseGenderChoices):
class Meta(BaseGenderChoices.Meta):
swappable = swapper.swappable_setting(
BaseGenderChoices.Meta.app_label, "GenderChoices")
| Python | 0.000534 | |
f64fbffcaab11f9532880c55c89c0bb0bd42cec8 | Add missing images | nclxd/nova/virt/lxd/images.py | nclxd/nova/virt/lxd/images.py | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tarfile
from oslo.config import cfg
from nova import utils
from nova.i18n import _, _LW, _LE, _LI
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.virt import images
from nova import exception
from . import utils as container_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ContainerImage(object):
def __init__(self, context, instance, image_meta):
self.idmap = container_utils.LXCUserIdMap()
self.context = context
self.image_meta = image_meta
self.instance = instance
self.max_size = 0
self.base_dir = os.path.join(CONF.lxd.lxd_root_dir,
CONF.image_cache_subdirectory_name)
self.root_dir = os.path.join(CONF.lxd.lxd_root_dir,
self.instance['uuid'])
self.container_dir = os.path.join(self.root_dir, 'rootfs')
self.image_dir = os.path.join(self.base_dir,
self.instance['image_ref'])
self.container_image = os.path.join(self.base_dir,
'%s.tar.gz' % self.instance['image_ref'])
def create_container(self):
LOG.info(_LI('Fetching image from glance.'))
disk_format = self.image_meta.get('disk_format')
if disk_format != 'root-tar' and disk_format is not None:
msg = _('Unable to determine disk format for image.')
raise exception.Invalid(msg)
if not os.path.exists(self.base_dir):
fileutils.ensure_tree(self.base_dir)
if not os.path.exists(self.root_dir):
fileutils.ensure_tree(self.root_dir)
if not os.path.exists(self.container_image):
images.fetch_to_raw(self.context, self.instance['image_ref'], self.container_image,
self.instance['user_id'], self.instance['project_id'],
max_size=self.max_size)
if not tarfile.is_tarfile(self.container_image):
msg = _('Not a valid tarfile')
raise exception.InvalidImageRef(msg)
if os.path.exists(self.container_dir):
msg = _('Contianer rootfs already exists')
raise exception.NovaException(msg)
(out, err) = utils.execute('stat', '-f', '-c', '%T', self.root_dir)
filesystem_type = out.rstrip()
if filesystem_type == 'btrfs':
self.create_btrfs_container()
else:
self.create_local_container()
def create_btrfs_container(self):
LOG.info(_LI('Creating btrfs container rootfs'))
if not os.path.exists(self.image_dir):
utils.execute('btrfs', 'subvolume', 'create', self.image_dir)
self._write_image()
size = self.instance['root_gb']
utils.execute('btrfs', 'subvolume', 'snapshot', self.image_dir,
self.container_dir, run_as_root=True)
if size != 0:
utils.execute('btrfs', 'quota', 'enable', self.container_dir,
run_as_root=True)
utils.execute('btrfs', 'qgroup', 'limit', '%sG' % size,
self.container_dir, run_as_root=True)
def create_local_container(self):
LOG.info(_LI('Creating local container rootfs'))
if not os.path.exists(self.container_dir):
fileutils.ensure_tree(self.container_dir)
self._write_image()
def _write_image(self):
(user, group) = self.idmap.get_user()
utils.execute('tar', '--directory', self.container_dir,
'--anchored', '--numeric-owner',
'-xpzf', self.container_image,
check_exit_code=[0, 2])
utils.execute('chown', '-R', '%s:%s' % (user, group),
self.container_dir,
run_as_root=True) | Python | 0.000002 | |
95edeaa711e8c33e1b431f792e0f2638126ed461 | Add test case for dynamic ast | pymtl/tools/translation/dynamic_ast_test.py | pymtl/tools/translation/dynamic_ast_test.py | #=======================================================================
# verilog_from_ast_test.py
#=======================================================================
# This is the test case that verifies the dynamic AST support of PyMTL.
# This test is contributed by Zhuanhao Wu through #169, #170 of PyMTL v2.
#
# Author : Zhuanhao Wu, Peitian Pan
# Date : Jan 23, 2019
import pytest
import random
from ast import *
from pymtl import *
from pclib.test import run_test_vector_sim
from verilator_sim import TranslationTool
pytestmark = requires_verilator
class ASTRTLModel(Model):
def __init__( s ):
s.a = InPort(2)
s.b = InPort(2)
s.out = OutPort(2)
# a simple clocked adder
# @s.posedge_clk
# def logic():
# s.out.next = s.a + s.b
# generate the model from ast
tree = Module(body=[
FunctionDef(name='logic', args=arguments(args=[], defaults=[]),
body= [
Assign(targets=[
Attribute(value=Attribute(value=Name(id='s', ctx=Load()), attr='out', ctx=Load()), attr='next', ctx=Store())
],
value=BinOp(left=Attribute(value=Name(id='s', ctx=Load()), attr='a', ctx=Load()), op=Add(), right=Attribute(value=Name(id='s', ctx=Load()), attr='b', ctx=Load()))
)
],
decorator_list=[
Attribute(value=Name(id='s', ctx=Load()), attr='posedge_clk', ctx=Load())
],
returns=None)
])
tree = fix_missing_locations(tree)
# Specifiy the union of globals() and locals() so the free
# variables in the closure can be captured.
exec(compile(tree, filename='<ast>', mode='exec')) in globals().update( locals() )
# As with #175, the user needs to supplement the dynamic AST to
# the .ast field of the generated function object.
logic.ast = tree
def test_ast_rtl_model_works_in_simulation():
mod = ASTRTLModel()
test_vector_table = [('a', 'b', 'out*')]
last_result = '?'
for i in xrange(3):
rv1 = Bits(2, random.randint(0, 3))
rv2 = Bits(2, random.randint(0, 3))
test_vector_table.append( [ rv1, rv2, last_result ] )
last_result = Bits(2, rv1 + rv2)
run_test_vector_sim(mod, test_vector_table)
def test_ast_rtl_model_to_verilog():
mod = ASTRTLModel()
# TranslationTool should successfully compile ASTRTLModel
tool = TranslationTool(mod)
| Python | 0.000002 | |
49cab51aa8697a56c7cf74e45b77d9a20ad1a178 | add topk/gen.py | topk/gen.py | topk/gen.py | #!/usr/bin/python
import random
word_len = 5
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-'
output = open('word_count', 'w')
words = set()
N = 1000*1000
for x in xrange(N):
arr = [random.choice(alphabet) for i in range(word_len)]
words.add(''.join(arr))
print len(words)
for word in words:
output.write(word)
output.write('\t')
output.write(str(random.randint(1, 2*N)))
output.write('\n')
| Python | 0 | |
c5da52c38d280873066288977f021621cb9653d0 | Apply orphaned migration | project/apps/api/migrations/0010_remove_chart_song.py | project/apps/api/migrations/0010_remove_chart_song.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20150722_1041'),
]
operations = [
migrations.RemoveField(
model_name='chart',
name='song',
),
]
| Python | 0 | |
ab5d1fd5728b9c2f27d74c2896e05a94b061f3f9 | add config.py | config.py | config.py | # twilio account details
account = ""
token = ""
| Python | 0.000002 | |
42cc2864f03480e29e55bb0ef0b30e823c11eb2f | complete check online window | check_online_window.py | check_online_window.py | import tkinter as tk
import tkinter.messagebox as tkmb
from online_check import CheckSomeoneOnline
class open_check_online_window():
def __init__(self, x, y):
self.co = tk.Tk()
self.co.title('enter ip to check')
self.co.resizable(False, False)
self.co.wm_attributes("-toolwindow", 1)
self.entry = tk.Entry(self.co, width=15)
self.entry.pack(side = 'left', fill = 'both')
check = tk.Button(self.co,
text='check',
relief = 'flat',
command=self.check_online)
check.pack(side = 'right', fill = 'both')
self.co.geometry('+%d+%d'% (x,y))
self.co.mainloop()
def on_return(self, event):
self.check_online()
def check_online(self):
ip = self.entry.get()
try:
if True:#if CheckSomeoneOnline(ip) == True:
tkmb.showinfo('online check', ip+'is online')
else:
tkmb.showinfo('online check', ip+'is offline')
except Exception as err:
tkmb.showerror('Error', err)
self.co.quit()
if __name__ == '__main__':
open_check_online_window(600, 300) | Python | 0 | |
956da3bc7ff7971b9b6cc76495fcb5b2e4145d6e | Handle smtplib.SMTPRecipientsRefused and defer the message properly. | mailerdev/mailer/engine.py | mailerdev/mailer/engine.py | import time
import smtplib
from lockfile import FileLock
from socket import error as socket_error
from models import Message, DontSendEntry, MessageLog
from django.core.mail import send_mail as core_send_mail
## configuration settings
# @@@ eventually move to settings.py
# when queue is empty, how long to wait (in seconds) before checking again
EMPTY_QUEUE_SLEEP = 30
def prioritize():
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
while Message.objects.high_priority().count() or Message.objects.medium_priority().count():
while Message.objects.high_priority().count():
for message in Message.objects.high_priority().order_by('when_added'):
yield message
while Message.objects.high_priority().count() == 0 and Message.objects.medium_priority().count():
yield Message.objects.medium_priority().order_by('when_added')[0]
while Message.objects.high_priority().count() == 0 and Message.objects.medium_priority().count() == 0 and Message.objects.low_priority().count():
yield Message.objects.low_priority().order_by('when_added')[0]
if Message.objects.non_deferred().count() == 0:
break
def send_all():
"""
Send all eligible messages in the queue.
"""
print "-" * 72
lock = FileLock("send_mail")
print "acquiring lock..."
lock.acquire()
print "acquired."
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
for message in prioritize():
if DontSendEntry.objects.has_address(message.to_address):
print "skipping email to %s as on don't send list " % message.to_address
MessageLog.objects.log(message, 2) # @@@ avoid using literal result code
message.delete()
dont_send += 1
else:
try:
print "sending message '%s' to %s" % (message.subject.encode("utf-8"), message.to_address.encode("utf-8"))
core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
message.delete()
sent += 1
# @@@ need to catch some other things here too
except (socket_error, smtplib.SMTPRecipientsRefused), err:
message.defer()
print "message deferred due to failure: %s" % err
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
deferred += 1
finally:
print "releasing lock..."
lock.release()
print "released."
print
print "%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send)
print "done in %.2f seconds" % (time.time() - start_time)
def send_loop():
"""
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
sending messages if any are on queue.
"""
while True:
while not Message.objects.all():
print 'sleeping for %s seconds before checking queue again' % EMPTY_QUEUE_SLEEP
time.sleep(EMPTY_QUEUE_SLEEP)
send_all()
| import time
from lockfile import FileLock
from socket import error as socket_error
from models import Message, DontSendEntry, MessageLog
from django.core.mail import send_mail as core_send_mail
## configuration settings
# @@@ eventually move to settings.py
# when queue is empty, how long to wait (in seconds) before checking again
EMPTY_QUEUE_SLEEP = 30
def prioritize():
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
while Message.objects.high_priority().count() or Message.objects.medium_priority().count():
while Message.objects.high_priority().count():
for message in Message.objects.high_priority().order_by('when_added'):
yield message
while Message.objects.high_priority().count() == 0 and Message.objects.medium_priority().count():
yield Message.objects.medium_priority().order_by('when_added')[0]
while Message.objects.high_priority().count() == 0 and Message.objects.medium_priority().count() == 0 and Message.objects.low_priority().count():
yield Message.objects.low_priority().order_by('when_added')[0]
if Message.objects.non_deferred().count() == 0:
break
def send_all():
"""
Send all eligible messages in the queue.
"""
print "-" * 72
lock = FileLock("send_mail")
print "acquiring lock..."
lock.acquire()
print "acquired."
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
for message in prioritize():
if DontSendEntry.objects.has_address(message.to_address):
print "skipping email to %s as on don't send list " % message.to_address
MessageLog.objects.log(message, 2) # @@@ avoid using literal result code
message.delete()
dont_send += 1
else:
try:
print "sending message '%s' to %s" % (message.subject.encode("utf-8"), message.to_address.encode("utf-8"))
core_send_mail(message.subject, message.message_body, message.from_address, [message.to_address])
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
message.delete()
sent += 1
# @@@ need to catch some other things here too
except socket_error, err:
message.defer()
print "message deferred due to failure: %s" % err
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
deferred += 1
finally:
print "releasing lock..."
lock.release()
print "released."
print
print "%s sent; %s deferred; %s don't send" % (sent, deferred, dont_send)
print "done in %.2f seconds" % (time.time() - start_time)
def send_loop():
"""
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
sending messages if any are on queue.
"""
while True:
while not Message.objects.all():
print 'sleeping for %s seconds before checking queue again' % EMPTY_QUEUE_SLEEP
time.sleep(EMPTY_QUEUE_SLEEP)
send_all()
| Python | 0.000377 |
05b9859fb7d4577dfa95ec9edd3a6f16bf0fd86e | Create __init__.py | fade/fade/__init__.py | fade/fade/__init__.py | Python | 0.000429 | ||
f9b5ac91ae53f643c8aca2bab7eb0cb8b6a997cd | add script to split text in sentences | add_sentences.py | add_sentences.py | # -*- coding: utf-8 -*-
import os
import argparse
from lxml import etree
import fnmatch
import time
import json
import nltk
def timeit(method):
"""Time methods."""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f sec' %
(method.__name__, te-ts))
return result
return timed
class AddSentences(object):
"""Split text in sentences."""
@timeit
def __init__(self):
self.cli()
self.infiles = self.get_files(self.indir, self.pattern)
self.n_proceedings = 0
self.loc = self.get_localized_vars()
self.tokenizer = self.init_tokenizer()
self.main()
def __str__(self):
message = "{} EuroParl's {} proceedings processed!".format(
str(self.n_proceedings),
self.language)
return message
def get_files(self, directory, fileclue):
"""Get all files in a directory matching a pattern.
Keyword arguments:
directory -- a string for the input folder path
fileclue -- a string as glob pattern
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, fileclue):
matches.append(os.path.join(root, filename))
return matches
def get_localized_vars(self):
"""Import localized variables from JSON file."""
fname = self.language+".json"
fpath = os.path.join('localization', fname)
with open(fpath, mode="r", encoding="utf-8") as jfile:
content = jfile.read()
vars = json.loads(content)
return vars
def read_xml(self, infile):
"""Parse a XML file.
Keyword arguments:
infile -- a string for the path to the file to be read.
"""
parser = etree.XMLParser(remove_blank_text=True)
with open(infile, encoding='utf-8', mode='r') as input:
return etree.parse(input, parser)
def serialize(self, infile, root):
"""Serialize Element as XML file.
Keyword arguments:
infile -- a string for the path to the input file processed.
root -- Element to be serialized as XML.
"""
ofile_name = os.path.splitext(os.path.basename(infile))[0]
ofile_path = os.path.join(self.outdir, ofile_name+'.xml')
xml = etree.tostring(
root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True).decode('utf-8')
with open(ofile_path, mode='w', encoding='utf-8') as ofile:
ofile.write(xml)
pass
def init_tokenizer(self):
"""Instantiate a tokenizer suitable for the language at stake."""
lang = {'en': 'english', 'de': 'german', 'es': 'spanish'}
tokenizer = nltk.data.load(
'tokenizers/punkt/{}.pickle'.format(lang[self.language]))
if 'extra_abbreviations' in self.loc:
tokenizer._params.abbrev_types.update(
self.loc['extra_abbreviations'])
return tokenizer
def get_sentences(self, element):
"""Split element's text in sentences.
Keyword arguments:
element -- Element whose text has to be split.
"""
text = element.text
sentences = self.tokenizer.tokenize(text)
element.text = None
for sentence in sentences:
etree.SubElement(element, 's').text = sentence
pass
def main(self):
for infile in self.infiles:
print(infile)
tree = self.read_xml(infile)
elements = tree.xpath('.//{}'.format(self.element))
for e in elements:
self.get_sentences(e)
self.serialize(infile, tree)
self.n_proceedings += 1
pass
def cli(self):
"""CLI parses command-line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input",
required=True,
help="path to the input directory.")
parser.add_argument(
"-o", "--output",
required=True,
help="path to the output directory.")
parser.add_argument(
"-l", "--language",
required=True,
choices=['en', 'es', 'de'],
help="language of the version to be processed.")
parser.add_argument(
"-e", "--element",
required=False,
default='p',
help="XML element containing the text to be split in sentences.")
parser.add_argument(
'-p', "--pattern",
required=False,
default="*.xml",
help="glob pattern to filter files.")
args = parser.parse_args()
self.indir = args.input
self.outdir = args.output
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.language = args.language
self.element = args.element
self.pattern = args.pattern
pass
print(AddSentences())
| Python | 0.000087 | |
1b6fecb5819fbead0aadcc1a8669e915542c5ea0 | Add script for gameifying testing | other/testing-game.py | other/testing-game.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
import subprocess
import re
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='The directory to search for files in', required=False, default=os.getcwd())
args = parser.parse_args()
names = {}
for root, dirs, files in os.walk(args.directory):
for name in files:
filename, fileextension = os.path.splitext(name)
absfile = os.path.join(root, name)
if fileextension == '.m' or fileextension == '.mm':
try:
with open(absfile) as sourcefile:
source = sourcefile.read()
if source.find('XCTestCase') != -1:
p = subprocess.Popen(['git', 'blame', absfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
for blame_line in out.splitlines():
if blame_line.replace(' ', '').find('-(void)test') != -1:
blame_info = blame_line[blame_line.find('(')+1:]
blame_info = blame_info[:blame_info.find(')')]
blame_components = blame_info.split()
name_components = blame_components[:len(blame_components)-4]
name = ' '.join(name_components)
name_count = names.get(name, 0)
names[name] = name_count + 1
except:
'Could not open file: ' + absfile
print names | Python | 0 | |
908013aa5e64589b6c1c6495812a13109244a69a | add dottests testconfig, but leave it deactivted yet. lots of import failures, since no imports are declared explicitly | src/icalendar/tests/XXX_test_doctests.py | src/icalendar/tests/XXX_test_doctests.py | from interlude import interact
import doctest
import os.path
import unittest
OPTIONFLAGS = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
DOCFILES = [
'example.txt',
'groupscheduled.txt',
'multiple.txt',
'recurrence.txt',
'small.txt'
]
DOCMODS = [
'icalendar.caselessdict',
'icalendar.cal',
'icalendar.parser',
'icalendar.prop',
]
def test_suite():
suite = unittest.TestSuite()
suite.addTests([
doctest.DocFileSuite(
os.path.join(os.path.dirname(__file__), docfile),
module_relative=False,
optionflags=OPTIONFLAGS,
globs={'interact': interact}
) for docfile in DOCFILES
])
suite.addTests([
doctest.DocTestSuite(
docmod,
optionflags=OPTIONFLAGS,
globs={'interact': interact}
) for docmod in DOCMODS
])
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| Python | 0 | |
4a99dcd629a830ad1ec0c658f312a4793dec240b | add basic file for parser | RedBlue/Parser3.py | RedBlue/Parser3.py |
class Parser(object):
@classmethod
def read_html(cls, html):
pass
| Python | 0.000001 | |
3fee2399901bfc91b5eb5dfc71d17b008dd4b7fb | Add limit protection tests | keystone/tests/unit/protection/v3/test_limits.py | keystone/tests/unit/protection/v3/test_limits.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from six.moves import http_client
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
def _create_limit_and_dependencies():
"""Create a limit and its dependencies to test with."""
service = PROVIDERS.catalog_api.create_service(
uuid.uuid4().hex, unit.new_service_ref()
)
registered_limit = unit.new_registered_limit_ref(
service_id=service['id'], id=uuid.uuid4().hex
)
registered_limits = (
PROVIDERS.unified_limit_api.create_registered_limits(
[registered_limit]
)
)
registered_limit = registered_limits[0]
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
limit = unit.new_limit_ref(
project_id=project['id'], service_id=service['id'],
resource_name=registered_limit['resource_name'],
resource_limit=5, id=uuid.uuid4().hex
)
limits = PROVIDERS.unified_limit_api.create_limits([limit])
return limits
class _UserLimitTests(object):
"""Common default functionality for all users except system admins."""
def test_user_can_get_limit_model(self):
with self.test_client() as c:
c.get('/v3/limits/model', headers=self.headers)
def test_user_can_get_a_limit(self):
limits = _create_limit_and_dependencies()
limit = limits[0]
with self.test_client() as c:
r = c.get('/v3/limits/%s' % limit['id'], headers=self.headers)
self.assertEqual(limit['id'], r.json['limit']['id'])
def test_user_can_list_limits(self):
limits = _create_limit_and_dependencies()
limit = limits[0]
with self.test_client() as c:
r = c.get('/v3/limits', headers=self.headers)
self.assertTrue(len(r.json['limits']) == 1)
self.assertEqual(limit['id'], r.json['limits'][0]['id'])
def test_user_cannot_create_limits(self):
service = PROVIDERS.catalog_api.create_service(
uuid.uuid4().hex, unit.new_service_ref()
)
registered_limit = unit.new_registered_limit_ref(
service_id=service['id'], id=uuid.uuid4().hex
)
registered_limits = (
PROVIDERS.unified_limit_api.create_registered_limits(
[registered_limit]
)
)
registered_limit = registered_limits[0]
project = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex,
unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
)
create = {
'limits': [
unit.new_limit_ref(
project_id=project['id'], service_id=service['id'],
resource_name=registered_limit['resource_name'],
resource_limit=5
)
]
}
with self.test_client() as c:
c.post(
'/v3/limits', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_limits(self):
limits = _create_limit_and_dependencies()
limit = limits[0]
update = {'limits': {'description': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/limits/%s' % limit['id'], json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_limits(self):
limits = _create_limit_and_dependencies()
limit = limits[0]
with self.test_client() as c:
c.delete(
'/v3/limits/%s' % limit['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserLimitTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
| Python | 0.000002 | |
0648ca26ba195e4d5ce55d801975a161907e655f | Add test for translation | aldryn_faq/tests/test_aldryn_faq.py | aldryn_faq/tests/test_aldryn_faq.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase # , TransactionTestCase
# from django.utils import translation
from hvad.test_utils.context_managers import LanguageOverride
from aldryn_faq.models import Category, Question
EN_CAT_NAME = "Example"
EN_CAT_SLUG = "example"
EN_QUE_TITLE = "Test Question"
EN_QUE_ANSWER_TEXT = "Test Answer"
DE_CAT_NAME = "Beispiel"
DE_CAT_SLUG = "beispiel"
DE_QUE_TITLE = "Testfrage"
DE_QUE_ANSWER_TEXT = "Test Antwort"
class AldrynFaqTestMixin(object):
@staticmethod
def reload(object):
"""Simple convenience method for re-fetching an object from the ORM."""
return object.__class__.objects.get(id=object.id)
def mktranslation(self, obj, lang, **kwargs):
"""Simple method of adding a translation to an existing object."""
obj.translate(lang)
for k, v in kwargs.iteritems():
setattr(obj, k, v)
obj.save()
def setUp(self):
"""Setup a prebuilt and translated Question with Category
for testing."""
with LanguageOverride("en"):
self.category = Category(**{
"name": EN_CAT_NAME,
"slug": EN_CAT_SLUG
})
self.category.save()
self.question = Question(**{
"title": EN_QUE_TITLE,
"answer_text": EN_QUE_ANSWER_TEXT,
})
self.question.category = self.category
self.question.save()
# Make a DE translation of the category
self.mktranslation(self.category, "de", **{
"name": DE_CAT_NAME,
"slug": DE_CAT_SLUG,
})
# Make a DE translation of the question
self.mktranslation(self.question, "de", **{
"title": DE_QUE_TITLE,
"answer_text": DE_QUE_ANSWER_TEXT,
})
class TestFAQTranslations(AldrynFaqTestMixin, TestCase):
def test_fetch_faq_translations(self):
"""Test we can fetch arbitrary translations of the question and
its category."""
# Can we target the EN values?
with LanguageOverride("en"):
question = self.reload(self.question)
category = self.reload(self.question.category)
self.assertEqual(question.title, EN_QUE_TITLE)
self.assertEqual(question.answer_text, EN_QUE_ANSWER_TEXT)
self.assertEqual(category.name, EN_CAT_NAME)
self.assertEqual(category.slug, EN_CAT_SLUG)
# And the DE values?
with LanguageOverride("de"):
question = self.reload(self.question)
category = self.reload(self.question.category)
self.assertEqual(question.title, DE_QUE_TITLE)
self.assertEqual(question.answer_text, DE_QUE_ANSWER_TEXT)
self.assertEqual(category.name, DE_CAT_NAME)
self.assertEqual(category.slug, DE_CAT_SLUG)
| Python | 0.000001 | |
d1f71e1c6468799247d07d810a6db7d0ad5f89b0 | add support for jinja2 template engine | alaocl/jinja2.py | alaocl/jinja2.py | from alaocl import *
#__all__ = (
# 'addOCLtoEnvironment',
#)
_FILTERS = {
'asSet': asSet,
'asBag': asBag,
'asSeq': asSeq,
}
_GLOBALS = {
'floor': floor,
'isUndefined': isUndefined,
'oclIsUndefined': oclIsUndefined,
'oclIsKindOf': oclIsKindOf,
'oclIsTypeOf': oclIsTypeOf,
'isCollection': isCollection,
'asSet': asSet,
'asBag': asBag,
'asSeq': emptyCollection
}
try:
# noinspection PyUnresolvedReferences
from org.modelio.api.modelio import Modelio
WITH_MODELIO = True
except:
WITH_MODELIO = False
if WITH_MODELIO:
# TODO: in fact, this piece of code should be in modelio
# and it should be possible to import global stuff at once
# - at the top level script
# - as jinja global
# - in any python module
# Lambda expressions cannot be defined directly in the loop. See below:
# http://stackoverflow.com/questions/841555/
# whats-going-on-with-the-lambda-expression-in-this-python-function?rq=1
def _newIsInstanceFun(metaInterface):
return lambda e: isinstance(e, metaInterface)
from alaocl.modelio import allMetaInterfaces
for m_interface in allMetaInterfaces():
metaName = m_interface.metaName
_GLOBALS[metaName] = m_interface
isFunction = _newIsInstanceFun(m_interface)
_GLOBALS['is' + metaName] = isFunction
globals()['is' + metaName] = isFunction
def addOCLtoEnvironment(jinja2Environment):
"""
Add OCL functions to a jinja2 environment so that OCL can be
used in jinja2 templates.
:param jinja2Environment: Jinja2 environment to be instrumented.
:type jinja2Environment: jinja2.Environment
:return: The modified environment.
:rtype: jinja2.Environment
"""
jinja2Environment.filters.update(_FILTERS)
jinja2Environment.globals.update(_GLOBALS)
| Python | 0 | |
04142e3bd0c09e6f712669529e780a18c11c7076 | Add script to validate files are valid DocBook. | validate.py | validate.py | #!/usr/bin/env python
'''
Usage:
validate.py [path]
Validates all xml files against the DocBook 5 RELAX NG schema.
Options:
path Root directory, defaults to <repo root>/doc/src/doc/docbkx
Ignores pom.xml files and subdirectories named "target".
Requires Python 2.7 or greater (for argparse) and the lxml Python library.
'''
from lxml import etree
import argparse
import os
import subprocess
import sys
import urllib2
def get_schema():
"""Return the DocBook RELAX NG schema"""
url = "http://www.docbook.org/xml/5.0/rng/docbookxi.rng"
relaxng_doc = etree.parse(urllib2.urlopen(url))
return etree.RelaxNG(relaxng_doc)
def validation_failed(schema, doc):
"""Return True if the parsed doc fails against the schema
This will ignore validation failures of the type: IDREF attribute linkend
references an unknown ID. This is because we are validating individual
files that are being imported, and sometimes the reference isn't present
in the current file."""
return not schema.validate(doc) and \
any(log.type_name != "DTD_UNKNOWN_ID" for log in schema.error_log)
def error_message(error_log):
"""Return a string that contains the error message.
We use this to filter out false positives related to IDREF attributes
"""
errs = [str(x) for x in error_log if x.type_name != 'DTD_UNKNOWN_ID']
# Reverse output so that earliest failures are reported first
errs.reverse()
return "\n".join(errs)
def main(rootdir):
schema = get_schema()
any_failures = False
for root, dirs, files in os.walk(rootdir):
# Don't descend into 'target' subdirectories
try:
ind = dirs.index('target')
del dirs[ind]
except ValueError:
pass
for f in files:
# Ignore maven files, which are called pom.xml
if f.endswith('.xml') and f != 'pom.xml':
try:
path = os.path.abspath(os.path.join(root, f))
doc = etree.parse(path)
if validation_failed(schema, doc):
any_failures = True
print error_message(schema.error_log)
except etree.XMLSyntaxError as e:
any_failures = True
print "%s: %s" % (path, e)
if any_failures:
sys.exit(1)
def default_root():
"""Return the location of openstack-manuals/doc/src/docbkx
The current working directory must be inside of the openstack-manuals
repository for this method to succeed"""
args = ["git", "rev-parse", "--show-toplevel"]
gitroot = subprocess.check_output(args).rstrip()
return os.path.join(gitroot, "doc/src/docbkx")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Validate XML files against "
"the DocBook 5 RELAX NG schema")
parser.add_argument('path', nargs='?', default=default_root(),
help="Root directory that contains DocBook files, "
"defaults to `git rev-parse --show-toplevel`/doc/src/"
"docbkx")
args = parser.parse_args()
main(args.path)
| Python | 0.000001 | |
7cb62f554fa293a2ba4d0456ed8d04e8f277d2c1 | Add migrations/0146_clean_lexeme_romanised_3.py | ielex/lexicon/migrations/0146_clean_lexeme_romanised_3.py | ielex/lexicon/migrations/0146_clean_lexeme_romanised_3.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Lexeme = apps.get_model("lexicon", "Lexeme")
replaceMap = {
'ฮป': 'ส',
'ฯ': 'ษธ'
}
for lexeme in Lexeme.objects.all():
if len(set(replaceMap.keys()) & set(lexeme.romanised)):
for k, v in replaceMap.items():
lexeme.romanised = lexeme.romanised.replace(k, v)
lexeme.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0145_fix_language_distributions')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| Python | 0 | |
8f1b1ef01e74782f57da9c9489a3a7f6555bbee6 | Add tests for reports views. | annotran/reports/test/views_test.py | annotran/reports/test/views_test.py | # -*- coding: utf-8 -*-
import mock
import pytest
from pyramid import httpexceptions
from annotran.reports import views
_SENTINEL = object() | Python | 0 | |
691ae15cb0f46400762c27305fb74f57fa1ffccf | Implement account.py | src/account.py | src/account.py | from datetime import datetime
from hashlib import md5
from re import match, search, DOTALL
from requests.sessions import Session
from bs4 import BeautifulSoup
BASE_URL = 'https://usereg.tsinghua.edu.cn'
LOGIN_PAGE = BASE_URL + '/do.php'
INFO_PAGE = BASE_URL + '/user_info.php'
class Account(object):
"""Tsinghua Account"""
def __init__(self, username, password, is_md5=False):
super(Account, self).__init__()
self.username = username
if is_md5:
if len(password) != 32:
raise ValueError('Length of a MD5 string must be 32')
self.md5_pass = password
else:
self.md5_pass = md5(password.encode()).hexdigest()
# Account Infomations.
self.name = ''
self.id = ''
# Balance & Usage.
self.balance = 0
self.ipv4_byte = 0
self.ipv6_byte = 0
self.last_check = None
# Status.
self.valid = False
def check(self):
try:
s = Session()
payload = dict(action='login',
user_login_name=self.username,
user_password=self.md5_pass)
login = s.post(LOGIN_PAGE, payload)
if not login: # Not a normal response, mayby the server is down?
return False
if login.text == 'ok':
self.valid = True
self.update_infos(s)
else:
self.valid = False
# Checking complete.
self.last_check = datetime.today()
return True
except: # Things happened so checking did not finish.
return False
def update_infos(self, session):
# Parse HTML.
soup = BeautifulSoup(session.get(INFO_PAGE).text, 'html.parser')
blocks = map(BeautifulSoup.get_text, soup.select('.maintd'))
i = map(str.strip, blocks) # Only works in python 3.
infos = dict(zip(i, i))
self.name = infos['ๅงๅ']
self.id = infos['่ฏไปถๅท']
self.balance = head_float(infos['ๅธๆทไฝ้ข'])
self.ipv4_byte = head_int(infos['ไฝฟ็จๆต้(IPV4)'])
self.ipv6_byte = head_int(infos['ไฝฟ็จๆต้(IPV6)'])
def __repr__(self):
return '<Account(%s, %s, %sB, ยฅ%s, %s)>' % (self.username,
self.valid,
self.ipv4_byte,
self.balance,
self.last_check)
def head_int(s):
return int(match(r'\d+', s).group())
def head_float(s):
return float(match(r'\d+(\.\d+)?', s).group())
if __name__ == '__main__':
acc = Account("lisihan13", "1L2S3H@th")
acc.check()
print(acc)
| Python | 0.000009 | |
940299a7bfd967653899b176ce76e6f1cf02ca83 | Add script to generate pairs of LIWC categories | liwcpairs2es.py | liwcpairs2es.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch, helpers
from collections import Counter
from datetime import datetime
def find_pairs(list1, list2):
pairs = []
if list1 and list2:
for item1 in list1:
for item2 in list2:
pairs.append(u'{}@{}'.format(item1, item2))
return pairs
es = Elasticsearch()
index_name = 'embem'
doc_type = 'event'
cat1 = 'Body'
cat2 = 'Posemo'
timestamp = datetime.now().isoformat()
pairs_count = Counter()
years = {}
q = {
"query": {
"wildcard": {"text_id": "*"}
}
}
results = helpers.scan(client=es, query=q, index=index_name, doc_type=doc_type)
for r in results:
# get tags
cat1_tags = r.get('_source').get('liwc-entities').get('data').get(cat1)
cat2_tags = r.get('_source').get('liwc-entities').get('data').get(cat2)
# find all pairs
pairs = find_pairs(cat1_tags, cat2_tags)
if pairs:
for pair in pairs:
pairs_count[pair] += 1
year = r.get('_source').get('year')
if year not in years.keys():
years[year] = Counter()
years[year][pair] += 1
# save pairs to ES
doc = {
'doc': {
'pairs-{}-{}'.format(cat1, cat2): {
'data': pairs,
'num_pairs': len(pairs),
'timestamp': timestamp
}
}
}
es.update(index=index_name, doc_type=doc_type,
id=r.get('_id'), body=doc)
sorted_years = years.keys()
sorted_years.sort()
print '{}\t{}\tFrequency'.format(cat1, cat2) + \
''.join(['\t{}'.format(k) for k in sorted_years])
print 'TOTAL\tTOTAL\t{}'.format(sum(pairs_count.values())) + \
''.join(['\t{}'.format(sum(years[k].values())) for k in sorted_years])
for p, f in pairs_count.most_common():
(w1, w2) = p.split('@')
print u'{}\t{}\t{}'.format(w1, w2, f).encode('utf-8') + \
''.join(['\t{}'.format(years[k][p]) for k in sorted_years])
| Python | 0 | |
73292532767d736a77ec8b122cfd4ff19b7d991b | Create Account dashboard backend | UI/account_dash.py | UI/account_dash.py | # -*- coding: utf-8 -*-
import threading
from PyQt4 import QtCore, QtGui
from qt_interfaces.account_dash_ui import Ui_AccountDash
from engine import StorjEngine
from utilities.tools import Tools
# Synchronization menu section #
class AccountDashUI(QtGui.QMainWindow):
def __init__(self, parent=None,):
QtGui.QWidget.__init__(self, parent)
self.account_dash_ui = Ui_AccountDash()
self.account_dash_ui.setupUi(self)
self.storj_engine = StorjEngine() # init StorjEngine
self.tools = Tools()
self.initialize_buckets_stats_table()
self.createNewBucketsStatsGetThread()
def createNewBucketsStatsGetThread(self):
thread = threading.Thread(target=self.fill_buckets_stats_table, args=())
thread.start()
def initialize_buckets_stats_table(self):
self.table_header = ['Bucket name', 'Files count', 'Total used space']
self.account_dash_ui.buckets_stats_table.setColumnCount(3)
self.account_dash_ui.buckets_stats_table.setRowCount(0)
horHeaders = self.table_header
self.account_dash_ui.buckets_stats_table.setHorizontalHeaderLabels(horHeaders)
self.account_dash_ui.buckets_stats_table.resizeColumnsToContents()
self.account_dash_ui.buckets_stats_table.resizeRowsToContents()
self.account_dash_ui.buckets_stats_table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
def fill_buckets_stats_table(self):
total_files_size = 0
total_files_count = 0
for bucket in self.storj_engine.storj_client.bucket_list():
total_bucket_files_size = 0
total_bucket_files_count = 0
# fill table
table_row_count = self.account_dash_ui.buckets_stats_table.rowCount()
self.account_dash_ui.buckets_stats_table.setRowCount(
table_row_count + 1)
for file in self.storj_engine.storj_client.bucket_files(bucket_id=bucket.id):
total_bucket_files_size += int(file['size'])
total_bucket_files_count += 1
self.account_dash_ui.buckets_stats_table.setItem(
table_row_count, 0, QtGui.QTableWidgetItem(bucket.name))
self.account_dash_ui.buckets_stats_table.setItem(
table_row_count, 1, QtGui.QTableWidgetItem(str(total_bucket_files_count)))
self.account_dash_ui.buckets_stats_table.setItem(
table_row_count, 2, QtGui.QTableWidgetItem(str(self.tools.human_size(total_bucket_files_size))))
total_files_count += total_bucket_files_count
total_files_size += total_bucket_files_size
self.account_dash_ui.files_total_count.setText(str(total_files_count))
self.account_dash_ui.total_used_space.setText(str(self.tools.human_size(total_files_size)))
| Python | 0.000001 | |
53258a9ffd869dd958fd818874b2c8406acca143 | add pytest for util.store | pytests/util/test_store.py | pytests/util/test_store.py | import pytest
import util.store
@pytest.fixture
def emptyStore():
return util.store.Store()
@pytest.fixture
def store():
return util.store.Store()
def test_get_of_unset_key(emptyStore):
assert emptyStore.get("any-key") == None
assert emptyStore.get("any-key", "default-value") == "default-value"
def test_get_of_set_key(store):
store.set("key", "value")
assert store.get("key") == "value"
def test_overwrite_set(store):
store.set("key", "value 1")
store.set("key", "value 2")
assert store.get("key") == "value 2"
def test_unused_keys(store):
store.set("key 1", "value x")
store.set("key 2", "value y")
assert store.unused_keys() == sorted(["key 1", "key 2"])
store.get("key 2")
assert store.unused_keys() == ["key 1"]
store.get("key 1")
assert store.unused_keys() == []
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0 | |
5bde5b5904abc30506e56865cd58fd88a97942aa | Add `deprecated` decorator | linux/keyman-config/keyman_config/deprecated_decorator.py | linux/keyman-config/keyman_config/deprecated_decorator.py | #!/usr/bin/python3
# based on https://stackoverflow.com/a/40301488
import logging
string_types = (type(b''), type(u''))
def deprecated(reason):
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
def decorator(func1):
def new_func1(*args, **kwargs):
logging.warning("Call to deprecated function '{name}': {reason}.".format(
name=func1.__name__, reason=reason))
return func1(*args, **kwargs)
return new_func1
return decorator
else:
# The @deprecated is used without any 'reason'.
def new_func2(*args, **kwargs):
func2 = reason
logging.warning("Call to deprecated function '{name}'.".format(name=func2.__name__))
return func2(*args, **kwargs)
return new_func2
| Python | 0.000001 | |
f6519493dd75d7f5a8b65a952b5d7048bd101ec4 | Create locationanalysis.py | locationanalysis.py | locationanalysis.py | import json
print 'test'
f = open('location.json', 'r')
jsoncontent = f.read()
print jsoncontent
location = json.loads(jsoncontent)
print len(location)
| Python | 0.000001 | |
f6d417e69efa4554008bc441a5c82a5b9f93a082 | Add sql.conventions.objects.Items | garage/sql/conventions/objects.py | garage/sql/conventions/objects.py | __all__ = [
'Items',
]
from garage.functools import nondata_property
from garage.sql.utils import insert_or_ignore, make_select_by
class Items:
"""A thin layer on top of tables of two columns: (id, value)"""
def __init__(self, table, id_name, value_name):
self.table = table
self.value_name = value_name
self._select_ids = make_select_by(
getattr(self.table.c, value_name),
getattr(self.table.c, id_name),
)
@nondata_property
def conn(self):
raise NotImplementedError
def select_ids(self, values):
return dict(self._select_ids(self.conn, values))
def insert(self, values):
insert_or_ignore(self.conn, self.table, [
{self.value_name: value} for value in values
])
| Python | 0.003446 | |
313f5c8c54002a736a323410c5d9ec96fcc2f50b | Create RespostaVer.py | backend/Models/Predio/RespostaVer.py | backend/Models/Predio/RespostaVer.py | from Framework.Resposta import Resposta
from Models.Campus.Campus import Campus as ModelCampus
class RespostaVer(Resposta):
def __init__(self,campus):
self.corpo = ModelCampus(campus)
| Python | 0 | |
95f5b7cd2325a61f537bffb783e950b30c97da5f | Add a demo about learning the shape parameter of gamma dist | bayespy/demos/gamma_shape.py | bayespy/demos/gamma_shape.py |
from bayespy import nodes
from bayespy.inference import VB
def run():
a = nodes.GammaShape(name='a')
b = nodes.Gamma(1e-5, 1e-5, name='b')
tau = nodes.Gamma(a, b, plates=(1000,), name='tau')
tau.observe(nodes.Gamma(10, 20, plates=(1000,)).random())
Q = VB(tau, a, b)
Q.update(repeat=1000)
print("True gamma parameters:", 10.0, 20.0)
print("Estimated parameters from 1000 samples:", a.u[0], b.u[0])
if __name__ == "__main__":
run()
| Python | 0 | |
d90bab60bf5f5423a7fee57ece8cd44acba113c1 | setup the environment and print the 'Hello World' | Base/D_00_HelloWorld.py | Base/D_00_HelloWorld.py | __author__ = 'James.Hongnian.Zhang'
# This is my first Python program.
# This means I have setup the environment for Python.
# Download it from https://www.python.org and install it.
# Then add it to your PATH
# That's it.
print 'Hello World'
| Python | 0.999999 | |
3c618e8424e64a62168c2a2c683748d2496ef7cb | Add Urban Dictionary module. | modules/urbandictionary.py | modules/urbandictionary.py | """Looks up a term from urban dictionary
@package ppbot
@syntax ud <word>
"""
import requests
import json
from modules import *
class Urbandictionary(Module):
def __init__(self, *args, **kwargs):
"""Constructor"""
Module.__init__(self, kwargs=kwargs)
self.url = "http://www.urbandictionary.com/iphone/search/define?term=%s"
def _register_events(self):
"""Register module commands."""
self.add_command('ud')
def ud(self, event):
"""Action to react/respond to user calls."""
if self.num_args >= 1:
word = '%20'.join(event['args'])
r = requests.get(self.url % (word))
ur = json.loads(r.text)
try:
definition = ur['list'][0]
message = "%(word)s (%(thumbs_up)d/%(thumbs_down)d): %(definition)s (ex: %(example)s)" % (definition)
self.msg(event['target'], message)
except KeyError:
self.msg(event['target'], 'Could find word "%s"' % ' '.join(event['args']))
else:
self.syntax_message(event['nick'], '.ud <word>')
| Python | 0 | |
d8fff759f2bff24f20cdbe98370ede9e5f3b7b13 | Add 2D helmholtz convergence test | convergence_tests/2D_helmholtz.py | convergence_tests/2D_helmholtz.py | from __future__ import absolute_import, division
from firedrake import *
import numpy as np
def helmholtz_mixed(x, V1, V2):
# Create mesh and define function space
mesh = UnitSquareMesh(2**x, 2**x)
V1 = FunctionSpace(mesh, *V1, name="V")
V2 = FunctionSpace(mesh, *V2, name="P")
W = V1 * V2
# Define variational problem
lmbda = 1
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
f = Function(V2)
f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
a = (p*q - q*div(u) + lmbda*inner(v, u) + div(v)*p) * dx
L = f*q*dx
# Compute solution
x = Function(W)
params = {'mat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
'pc_python_type': 'firedrake.HybridizationPC',
'hybridization': {'ksp_type': 'preonly',
'pc_type': 'lu',
'hdiv_residual': {'ksp_type': 'cg',
'ksp_rtol': 1e-14},
'use_reconstructor': True}}
solve(a == L, x, solver_parameters=params)
# Analytical solution
f.interpolate(Expression("sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
u, p = x.split()
err = sqrt(assemble(dot(p - f, p - f) * dx))
return x, err
V1 = ('RT', 1)
V2 = ('DG', 0)
x, err = helmholtz_mixed(8, V1, V2)
print err
File("helmholtz_mixed.pvd").write(x.split()[0], x.split()[1])
l2errs = []
for i in range(1, 9):
l2errs.append(helmholtz_mixed(i, V1, V2)[1])
l2errs = np.array(l2errs)
conv = np.log2(l2errs[:-1] / l2errs[1:])[-1]
print conv
| Python | 0 | |
c98109af519241a28c40217e8378a19903d4db0b | fix broken logic for fluff calcs using indicator_calculator | corehq/fluff/calculators/xform.py | corehq/fluff/calculators/xform.py | from datetime import timedelta
from corehq.fluff.calculators.logic import ANDCalculator, ORCalculator
import fluff
def default_date(form):
return form.received_on
# operators
EQUAL = lambda input, reference: input == reference
NOT_EQUAL = lambda input, reference: input != reference
IN = lambda input, reference_list: input in reference_list
IN_MULTISELECT = lambda input, reference: reference in (input or '').split(' ')
ANY = lambda input, reference: bool(input)
def ANY_IN_MULTISELECT(input, reference):
"""
For 'this multiselect contains any one of these N items'
"""
return any([subval in (input or '').split(' ') for subval in reference])
class IntegerPropertyReference(object):
"""
Returns the integer value of the property_path passed in.
By default FilteredFormPropertyCalculator would use 1 for all results
but this will let you return the actual number to be summed.
Accepts an optional transform lambda/method that would modify the
resulting integer before returning it.
"""
def __init__(self, property_path, transform=None):
self.property_path = property_path
self.transform = transform
def __call__(self, form):
value = int(form.xpath(self.property_path) or 0)
if value and self.transform:
value = self.transform(value)
return value
class FilteredFormPropertyCalculator(fluff.Calculator):
"""
Enables filtering forms by xmlns and (optionally) property == value.
Let's you easily define indicators such as:
- all adult registration forms
- all child registration forms with foo.bar == baz
- all newborn followups with bippity != bop
By default just emits a single "total" value for anything matching the filter,
though additional fields can be added by subclassing.
These can also be chained using logic operators for fun and profit.
"""
xmlns = None
property_path = None
property_value = None
indicator_calculator = None
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
if not self.indicator_calculator:
yield default_date(form)
else:
yield [default_date(form), self.indicator_calculator(form)]
def __init__(self, xmlns=None, property_path=None, property_value=None,
operator=EQUAL, indicator_calculator=None, window=None):
def _conditional_setattr(key, value):
if value:
setattr(self, key, value)
_conditional_setattr('xmlns', xmlns)
assert self.xmlns is not None
_conditional_setattr('property_path', property_path)
_conditional_setattr('property_value', property_value)
if self.property_path is not None and operator != ANY:
assert self.property_value is not None
self.operator = operator
_conditional_setattr('indicator_calculator', indicator_calculator)
super(FilteredFormPropertyCalculator, self).__init__(window)
def filter(self, form):
# filter
return (
form.xmlns == self.xmlns and (
self.property_path is None or
self.operator(form.xpath(self.property_path), self.property_value)
)
)
# meh this is a little redundant but convenient
class FormANDCalculator(ANDCalculator):
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
yield default_date(form)
class FormORCalculator(ORCalculator):
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
yield default_date(form)
class FormSUMCalculator(ORCalculator):
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
for calc in self.calculators:
if calc.passes_filter(form):
for total in calc.total(form):
yield total
| from datetime import timedelta
from corehq.fluff.calculators.logic import ANDCalculator, ORCalculator
import fluff
def default_date(form):
return form.received_on
# operators
EQUAL = lambda input, reference: input == reference
NOT_EQUAL = lambda input, reference: input != reference
IN = lambda input, reference_list: input in reference_list
IN_MULTISELECT = lambda input, reference: reference in (input or '').split(' ')
ANY = lambda input, reference: bool(input)
def ANY_IN_MULTISELECT(input, reference):
"""
For 'this multiselect contains any one of these N items'
"""
return any([subval in (input or '').split(' ') for subval in reference])
class IntegerPropertyReference(object):
"""
Returns the integer value of the property_path passed in.
By default FilteredFormPropertyCalculator would use 1 for all results
but this will let you return the actual number to be summed.
Accepts an optional transform lambda/method that would modify the
resulting integer before returning it.
"""
def __init__(self, property_path, transform=None):
self.property_path = property_path
self.transform = transform
def __call__(self, form):
value = int(form.xpath(self.property_path) or 0)
if value and self.transform:
value = self.transform(value)
return value
class FilteredFormPropertyCalculator(fluff.Calculator):
"""
Enables filtering forms by xmlns and (optionally) property == value.
Let's you easily define indicators such as:
- all adult registration forms
- all child registration forms with foo.bar == baz
- all newborn followups with bippity != bop
By default just emits a single "total" value for anything matching the filter,
though additional fields can be added by subclassing.
These can also be chained using logic operators for fun and profit.
"""
xmlns = None
property_path = None
property_value = None
indicator_calculator = None
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
if self.indicator_calculator:
yield default_date(form)
else:
yield [default_date(form), self.indicator_calculator(form)]
def __init__(self, xmlns=None, property_path=None, property_value=None,
operator=EQUAL, indicator_calculator=None, window=None):
def _conditional_setattr(key, value):
if value:
setattr(self, key, value)
_conditional_setattr('xmlns', xmlns)
assert self.xmlns is not None
_conditional_setattr('property_path', property_path)
_conditional_setattr('property_value', property_value)
if self.property_path is not None and operator != ANY:
assert self.property_value is not None
self.operator = operator
_conditional_setattr('indicator_calculator', indicator_calculator)
super(FilteredFormPropertyCalculator, self).__init__(window)
def filter(self, form):
# filter
return (
form.xmlns == self.xmlns and (
self.property_path is None or
self.operator(form.xpath(self.property_path), self.property_value)
)
)
# meh this is a little redundant but convenient
class FormANDCalculator(ANDCalculator):
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
yield default_date(form)
class FormORCalculator(ORCalculator):
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
yield default_date(form)
class FormSUMCalculator(ORCalculator):
window = timedelta(days=1)
@fluff.date_emitter
def total(self, form):
for calc in self.calculators:
if calc.passes_filter(form):
for total in calc.total(form):
yield total
| Python | 0.000001 |
5206a15d59bc8881629c48bb4136bb1a9cb7b4d0 | Create ms_old_identifiers.py | identifiers/ms_old_identifiers.py | identifiers/ms_old_identifiers.py | from identifier import *
import collections
CFBInfo = collections.namedtuple('CFBInfo', ['name', 'descripion', 'pattern'])
OFFICE_PATTERNS = [
'D0 CF 11 E0 A1 B1 1A E1'
]
FILE_PATTERNS = [
CFBInfo('DOC', 'Microsoft Word 97-2003', bytes.fromhex('EC A5 C1 20')),
CFBInfo('XLS', 'Microsoft Excel 97-2003', bytes.fromhex('09 08 10 20 20 06 05 20 A6 45 CD 07')),
]
class CfbResolver:
def identify(self, stream):
data = stream.read(128)
for filepat in FILE_PATTERNS:
index = data.find(filepat.pattern)
if index != -1:
return Result(filepat.name, filepat.description)
return Result('CFB')
def load(hound):
hound.add_matches(OFFICE_PATTERNS, CfbResolver())
| Python | 0.000111 | |
835a7b9bea1b006b5a096665d706b64b778d45ab | fix default param | python/federatedml/ensemble/test/hack_encrypter.py | python/federatedml/ensemble/test/hack_encrypter.py | class HackDecrypter():
def encrypt(self, val):
return val
def decrypt(self, val):
return val | Python | 0.000002 | |
aeb671484bc8e68a8aba3eaa80523ae153b8e9c9 | Add files via upload | youtube_list.py | youtube_list.py | from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import pafy
DEVELOPER_KEY = "AIzaSyCsrKjMf7_mHYrT6rIJ-oaA6KL5IYg389A"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(options):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = youtube.search().list(
q="Never Give",
part="id,snippet"
).execute()
videos = []
channels = []
playlists = []
# Add each result to the appropriate list, and then display the lists of
# matching videos, channels, and playlists.
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append("%s" % (search_result["id"]["videoId"]))
print videos[0],"/n"
#Papy audio stream URL
audio = pafy.new(videos[0])
print audio.audiostreams[0].url
if __name__ == "__main__":
argparser.add_argument("--q", help="Search term", default="Google")
argparser.add_argument("--max-results", help="Max results", default=25)
args = argparser.parse_args()
try:
youtube_search(args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
| Python | 0 | |
0770fab7c4985704e2793ab98150c9f1a2729e01 | Create easy_17_ArrayAdditionI.py | easy_17_ArrayAdditionI.py | easy_17_ArrayAdditionI.py | import itertools
#################################################
# This function will see if there is any #
# possible combination of the numbers in #
# the array that will give the largest number #
#################################################
def ArrayAdditionI(arr):
#sort, remove last element
result = "false"
arr.sort()
large = arr[-1]
arr = arr[:-1]
#go through every combination and see if sum = large
for x in range(2,len(arr) + 1):
for comb in itertools.combinations(arr,x):
if large == sum(comb):
result = "true"
break
return result
print ArrayAdditionI(raw_input())
| Python | 0.000034 | |
1cc15f3ae9a0b7fa5b2dae4bcdd9f0f3c061ce4d | Fix relate_name on Bug model | reclama/sprints/migrations/0002_auto_20150130_1751.py | reclama/sprints/migrations/0002_auto_20150130_1751.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sprints', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='bug',
name='event',
field=models.ManyToManyField(related_name='bugs', to='sprints.Event'),
preserve_default=True,
),
]
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.