commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
b83c4ddb14c9ba555d187125838a5189dfb3530c | Remove six as an explicit dependency. | setup.py | setup.py | import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=open('README.md').read(),
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.42',
'PyMySQL >= 0.6.6',
'sqlparse == 0.1.14',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=open('README.md').read(),
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.42',
'PyMySQL >= 0.6.6',
'sqlparse == 0.1.14',
'six >= 1.9',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python | 0 |
540bf48cdca59744baf043cbfa5056b07e493429 | fix sage script to work generally over a list of account ids to produce lists of journals | portality/scripts/journals_in_doaj_by_account.py | portality/scripts/journals_in_doaj_by_account.py | from portality import models
from portality.core import app
from portality.core import es_connection
import esprit
import csv
import json
from portality.util import ipt_prefix
class JournalQuery(object):
def __init__(self, owner):
self.owner = owner
def query(self):
return {
"query":{
"filtered":{
"filter":{
"bool":{
"must":[
{"term":{"admin.owner.exact": self.owner}},
{"term" : {"admin.in_doaj" : True}}
]
}
},
"query":{
"match_all":{}
}
}
}
}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="input account list")
parser.add_argument("-o", "--out", help="output file path")
args = parser.parse_args()
if not args.out:
print("Please specify an output file path with the -o option")
parser.print_help()
exit()
if not args.input:
print("Please specify an input file path with the -i option")
parser.print_help()
exit()
# conn = esprit.raw.make_connection(None, app.config["ELASTIC_SEARCH_HOST"], None, app.config["ELASTIC_SEARCH_DB"])
conn = es_connection
with open(args.out, "w", encoding="utf-8") as f, open(args.input, "r") as g:
reader = csv.reader(g)
writer = csv.writer(f)
writer.writerow(["Name", "Account", "ID", "Title"])
for row in reader:
query = JournalQuery(row[1])
print(json.dumps(query.query()))
count = 0
for j in esprit.tasks.scroll(conn, ipt_prefix(models.Journal.__type__), q=query.query(), limit=800, keepalive='5m'):
journal = models.Journal(_source=j)
bibjson = journal.bibjson()
writer.writerow([row[0], row[1], journal.id, bibjson.title])
count += 1
print(count) | Python | 0 | |
4d139c6d2b9ea368bfc5189537d9af67cea582f6 | Create demo_Take_Photo_when_PIR_high.py | demo_Take_Photo_when_PIR_high.py | demo_Take_Photo_when_PIR_high.py | import time
import picamera
import datetime
import RPi.GPIO as GPIO
def CheckPIR():
# dependencies are RPi.GPIO and time
# returns whats_here with "NOTHING HERE" or "SOMETHING HERE"
time.sleep(1)
#don't rush the PIR!
GPIO.setmode(GPIO.BOARD)
# set numbering system for GPIO PINs are BOARD
GPIO.setup(7, GPIO.IN)
# set up number 7 PIN for input from the PIR
# need to adjust if you connected PIR to another GPIO PIN
try:
val = GPIO.input(7)
if (val == True):
PIR_IS = 1
#PIR returned HIGH to GPIO PIN, so something here!
if (val == False):
PIR_IS = 0
#PIR returned LOW to GPIO PIN, so something here!
GPIO.cleanup()
except:
GPIO.cleanup()
return PIR_IS
PIR = 1
count = 0
while True:
PIR = 0
#Now to check the PIR and send what it returns to PIR
PIR = CheckPIR()
if PIR == 0:
print("Nothing has been detected by PIR")
elif PIR == 1:
print("Something has been seen! Time to photograph it!")
i = 0
with picamera.PiCamera() as camera:
while i < 5:
i = i+1
print(i)
camera.start_preview()
time.sleep(1)
utc_datetime = datetime.datetime.utcnow()
utc_datetime.strftime("%Y-%m-%d-%H%MZ")
#get date and time so we can append it to the image filename
camera.capture('image_'+str(utc_datetime)+'.jpg')
camera.stop_preview()
time.sleep(1)
if i == 5:
break
| Python | 0.000001 | |
feeb386efe01fb3dd4e70e216337c8a4b476cb9a | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, find_packages
VERSION = "1.0.0"
LONG_DESCRIPTION = """Behavioural Analysis involves the expressing the general
expectation of the state of the system while targeting a single or set of heuristics.
This is particularly helpful when there are large number of factors that can change
the behaviour of the system and testing all permutations of these input parameters
is impossible. In such a scenario an assertion of the final expectation can be
useful in managing performance and regression.
The Behavioural Analysis and Regression Toolkit is based on TRAPpy. The primary goal is
to assert behaviours using the FTrace output from the kernel
"""
REQUIRES = [
"TRAPpy==1.0.0",
]
setup(name='BART',
version=VERSION,
license="Apache v2",
author="ARM-BART",
author_email="bart@arm.com",
description="Behavioural Analysis and Regression Toolkit",
long_description=LONG_DESCRIPTION,
url="http://arm-software.github.io/bart",
packages=find_packages(),
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
# As we depend on trace data from the Linux Kernel/FTrace
"Topic :: System :: Operating System Kernels :: Linux",
"Topic :: Scientific/Engineering :: Visualization"
],
install_requires=REQUIRES
)
| Python | 0 | |
4a4231976f2f084c1233e3efe27f5d18b486f146 | Create setup.py | setup.py | setup.py | from setuptools import setup
import re
name = 'gcdb'
version = ''
with open('{0}/__init__.py'.format(name), 'rb') as f:
match_object = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE)
version = match_object.group(1)
setup(
name=name,
version=version,
packages=[name],
entry_points={'console_scripts': ['gcdb = gcdb:main']},
)
| Python | 0.000001 | |
3c314d006fb1726b671d0223f08fe16f0944cd82 | test call started sla | cla_backend/apps/reports/tests/test_mi_sla_report.py | cla_backend/apps/reports/tests/test_mi_sla_report.py | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import datetime
from django.test import TestCase
from legalaid.forms import get_sla_time
import mock
from core.tests.mommy_utils import make_recipe, make_user
from cla_eventlog import event_registry
from cla_eventlog.models import Log
from reports.forms import MICB1Extract
@contextmanager
def patch_field(cls, field_name, dt):
field = cls._meta.get_field(field_name)
mock_now = lambda: dt
with mock.patch.object(field, 'default', new=mock_now):
yield
class MiSlaTestCase(TestCase):
def test_call_started_sla(self):
with patch_field(Log, 'created', datetime.datetime(2015, 1, 2, 9, 0, 0)):
case = make_recipe('legalaid.case')
user = make_user()
make_recipe('call_centre.operator', user=user)
event = event_registry.get_event('call_me_back')()
_dt = datetime.datetime(2015, 1, 2, 9, 1, 0)
with patch_field(Log, 'created', datetime.datetime(2015, 1, 2, 9, 1, 0)):
event.get_log_code(case=case)
event.process(
case, created_by=user,
notes='',
context={
'requires_action_at': _dt,
'sla_15': get_sla_time(_dt, 15),
'sla_30': get_sla_time(_dt, 30),
'sla_120': get_sla_time(_dt, 120),
'sla_480': get_sla_time(_dt, 480)
},
)
case.requires_action_at = datetime.datetime(2015, 1, 2, 9, 1, 0)
case.save()
event = event_registry.get_event('case')()
with patch_field(Log, 'created', datetime.datetime(2015, 1, 2, 9, 30, 0)):
event.process(
case, status='call_started', created_by=user,
notes='Call started'
)
date_range = (
datetime.datetime(2015, 1, 1),
datetime.datetime(2015, 2, 1)
)
with mock.patch('reports.forms.MICB1Extract.date_range', date_range):
report = MICB1Extract()
qs = report.get_queryset()
self.assertFalse(qs[0][28])
| Python | 0.000001 | |
0e45b8fcf1978f560713864e18a270719d7d4872 | Make sure the handle dict values are string. Looks like dbus-python get confused if they are dbus.String. | sugar/activity/activityhandle.py | sugar/activity/activityhandle.py | # Copyright (C) 2006-2007 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from sugar.presence import presenceservice
class ActivityHandle(object):
"""Data structure storing simple activity metadata"""
def __init__(
self, activity_id=None, pservice_id=None,
object_id=None, uri=None
):
"""Initialise the handle from activity_id
activity_id -- unique id for the activity to be
created
pservice_id -- identity of the sharing service
for this activity in the PresenceService
object_id -- identity of the journal object
associated with the activity. It was used by
the journal prototype implementation, might
change when we do the real one.
When you resume an activity from the journal
the object_id will be passed in. It's optional
since new activities does not have an
associated object (yet).
XXX Not clear how this relates to the activity
id yet, i.e. not sure we really need both. TBF
uri -- URI associated with the activity. Used when
opening an external file or resource in the
activity, rather than a journal object
(downloads stored on the file system for
example or web pages)
"""
self.activity_id = activity_id
self.pservice_id = pservice_id
self.object_id = object_id
self.uri = uri
def get_shared_activity(self):
"""Retrieve the shared instance of this activity
Uses the PresenceService to find any existing dbus
service which provides sharing mechanisms for this
activity.
"""
if self.pservice_id:
pservice = presenceservice.get_instance()
return pservice.get_activity(self.pservice_id)
else:
return None
def get_dict(self):
"""Retrieve our settings as a dictionary"""
result = { }
if self.activity_id:
result['activity_id'] = str(self.activity_id)
if self.pservice_id:
result['pservice_id'] = str(self.pservice_id)
if self.object_id:
result['object_id'] = str(self.object_id)
if self.uri:
result['uri'] = str(self.uri)
return result
def create_from_dict(handle_dict):
"""Create a handle from a dictionary of parameters"""
result = ActivityHandle(
handle_dict['activity_id'],
pservice_id = handle_dict.get( 'pservice_id' ),
object_id = handle_dict.get('object_id'),
uri = handle_dict.get('uri'),
)
return result
| # Copyright (C) 2006-2007 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from sugar.presence import presenceservice
class ActivityHandle(object):
"""Data structure storing simple activity metadata"""
def __init__(
self, activity_id=None, pservice_id=None,
object_id=None, uri=None
):
"""Initialise the handle from activity_id
activity_id -- unique id for the activity to be
created
pservice_id -- identity of the sharing service
for this activity in the PresenceService
object_id -- identity of the journal object
associated with the activity. It was used by
the journal prototype implementation, might
change when we do the real one.
When you resume an activity from the journal
the object_id will be passed in. It's optional
since new activities does not have an
associated object (yet).
XXX Not clear how this relates to the activity
id yet, i.e. not sure we really need both. TBF
uri -- URI associated with the activity. Used when
opening an external file or resource in the
activity, rather than a journal object
(downloads stored on the file system for
example or web pages)
"""
self.activity_id = activity_id
self.pservice_id = pservice_id
self.object_id = object_id
self.uri = uri
def get_shared_activity(self):
"""Retrieve the shared instance of this activity
Uses the PresenceService to find any existing dbus
service which provides sharing mechanisms for this
activity.
"""
if self.pservice_id:
pservice = presenceservice.get_instance()
return pservice.get_activity(self.pservice_id)
else:
return None
def get_dict(self):
"""Retrieve our settings as a dictionary"""
result = { 'activity_id' : self.activity_id }
if self.pservice_id:
result['pservice_id'] = self.pservice_id
if self.object_id:
result['object_id'] = self.object_id
if self.uri:
result['uri'] = self.uri
return result
def create_from_dict(handle_dict):
"""Create a handle from a dictionary of parameters"""
result = ActivityHandle(
handle_dict['activity_id'],
pservice_id = handle_dict.get( 'pservice_id' ),
object_id = handle_dict.get('object_id'),
uri = handle_dict.get('uri'),
)
return result
| Python | 0 |
26cbfe83f0047c8ce66a21237db8ae484736a085 | Add TensorboardLogs class for use as a proxy to tensorboard data. | helpers/tensorboard.py | helpers/tensorboard.py | import glob
import numpy as np
import os
from tensorflow.tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from . import get_first_existing_path, get_nth_matching_path
from ..experiments import Experiment
class TensorboardLogs(object):
def __init__(self, path):
self.path = path
self.ea = EventAccumulator(self.path)
self.ea.Reload()
def get_scalars(self, name):
events = self.ea.Scalars(name)
scalars = np.array([(event.wall_time, event.step, event.value) for event in events])
return (scalars[:,0], scalars[:,1].astype('int'), scalars[:,2])
def find_log_path(config, main_path=None):
config.define('path.result.main.base', 'path.result.base', default='')
config.define('path.result.main.relative', 'path.result.relative', default='')
config.define('path.result.tensorboard.base', 'path.result.base.tensorboard', default='')
config.define('path.result.tensorboard.relative', 'path.result.relative.tensorboard', default='')
candidates = [os.path.join(config('path.result.tensorboard.base'), config('path.result.tensorboard.relative')),
os.path.join(config('path.result.main.base').replace('experiment', 'experiment-tb'), config('path.result.tensorboard.relative')),
os.path.join(Experiment.DEFAULT_TENSORBOARD_ROOT, config('path.result.tensorboard.relative')),
get_nth_matching_path(os.path.join(config('path.result.tensorboard.base'), config('path.result.main.relative')) + '@*', -1, ''),
get_nth_matching_path(os.path.join(config('path.result.main.base').replace('experiment', 'experiment-tb'), config('path.result.main.relative')) + '@*', -1, ''),
get_nth_matching_path(os.path.join(Experiment.DEFAULT_TENSORBOARD_ROOT, config('path.result.main.relative')) + '@*', -1, '')]
if main_path:
candidates.append(get_nth_matching_path(glob.escape(main_path.replace('experiment','experiment-tb')) + '@*', -1, ''))
path = get_first_existing_path(*candidates)
if not path:
raise FileNotFoundError('Tensorboard log directory is not found.')
return path | Python | 0 | |
c184e79b91a63299c249e207dba1e8cd95a8e5d0 | Add fpocket (#12675) | var/spack/repos/builtin/packages/fpocket/package.py | var/spack/repos/builtin/packages/fpocket/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fpocket(MakefilePackage):
"""fpocket is a very fast open source protein pocket detection algorithm
based on Voronoi tessellation."""
homepage = "https://github.com/Discngine/fpocket"
version('master', branch='master',
git='https://github.com/Discngine/fpocket.git')
depends_on("netcdf")
def setup_environment(self, spack_env, run_env):
if self.compiler.name == 'gcc':
spack_env.set('CXX', 'g++')
def edit(self):
makefile = FileFilter('makefile')
makefile.filter('BINDIR .*', 'BINDIR = %s/bin' % self.prefix)
makefile.filter('MANDIR .*', 'MANDIR = %s/man/man8' % self.prefix)
| Python | 0 | |
fb6eee18b2bf48dd0063623515ced00e980bdf10 | Add a few tests for docparse. | nipype/utils/tests/test_docparse.py | nipype/utils/tests/test_docparse.py | from nipype.testing import *
from nipype.utils.docparse import reverse_opt_map, build_doc
class Foo(object):
opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'}
foo_doc = """Usage: foo infile outfile [opts]
Bunch of options:
-o something about an outline
-f <f> intensity of fun factor
Other stuff:
-v verbose
"""
fmtd_doc = """Parameters
----------
outline :
something about an outline
fun :
<f> intensity of fun factor
Others Parameters
-----------------
-v verbose"""
def test_rev_opt_map():
map = {'-f': 'fun', '-o': 'outline'}
rev_map = reverse_opt_map(Foo.opt_map)
assert_equal(rev_map, map)
def test_build_doc():
opts = reverse_opt_map(Foo.opt_map)
doc = build_doc(foo_doc, opts)
assert_equal(doc, fmtd_doc)
| Python | 0.999642 | |
e71742bc0fc09ebf37532b92458670a4efe8926b | Add setup file | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='django-device-notifications',
version='0.0.1',
description='Generic library for APN & GCM notifications',
author='Johann Heller',
author_email='johann@rover.com',
url='https://github.com/roverdotcom/django-device-notifications',
packages=find_packages(exclude=('tests', 'docs'))
)
| Python | 0 | |
ec54935e169019067f2179a92d0f6e833f133bc9 | add a DataContainer implemented as a subclass of dict | simphony/core/data_container.py | simphony/core/data_container.py | from collections import Mapping
from simphony.core.cuba import CUBA
_ERROR_MESSAGE = "Keys {!r} are not in the approved CUBA keywords"
_CUBA_KEYS = set(CUBA)
class DataContainer(dict):
""" A DataContainer instance
The DataContainer object is implemented as a python dictionary whose keys
are restricted to be members of the CUBA enum class.
"""
# Memory usage optimization.
__slots__ = ()
def __init__(self, *args, **kwards):
""" Contructor.
Initialization follows the behaviour of the python dict class.
"""
self._check_arguments(args, kwards)
if len(args) == 1 and not hasattr(args[0], 'keys'):
super(DataContainer, self).__init__(**kwards)
for key, value in args[0]:
self.__setitem__(key, value)
return
super(DataContainer, self).__init__(*args, **kwards)
def __setitem__(self, key, value):
""" Set/Update the key value only when
"""
if key in _CUBA_KEYS:
super(DataContainer, self).__setitem__(key, value)
else:
message = "Key {!r} is not in the approved CUBA keywords"
raise KeyError(message.format(key))
def update(self, *args, **kwards):
self._check_arguments(args, kwards)
if len(args) == 1 and not hasattr(args[0], 'keys'):
for key, value in argument:
self.__setitem__(key, value)
return
super(DataContainer, self).update(*args, **kwards)
def _check_arguments(self, args, kwards):
""" Check for the right arguments
"""
# See if there are any non CUBA keys in the mapping argument
non_cuba_keys = kwards.viewkeys() - _CUBA_KEYS
if len(non_cuba_keys) > 0:
raise KeyError(_ERROR_MESSAGE.format(non_cuba_keys))
if len(args) == 1:
argument = args[0]
if isinstance(argument, DataContainer):
# This is already a DataContainer so we are sure that
# it only contains CUBA keys.
return
if isinstance(argument, Mapping):
# See if there any non CUBA keys in the mapping argument
non_cuba_keys = set(argument.keys()) - _CUBA_KEYS
if len(non_cuba_keys) > 0:
raise KeyError(_ERROR_MESSAGE.format(non_cuba_keys))
| Python | 0 | |
4912bac4ab534ca942393c36f71dd7df4182eb94 | add test_dot.py | sympy/printing/tests/test_dot.py | sympy/printing/tests/test_dot.py | from sympy.printing.dot import (purestr, styleof, attrprint, dotnode,
dotedges, dotprint)
from sympy import Symbol, Integer, Basic, Expr
from sympy.abc import x
def test_purestr():
assert purestr(Symbol('x')) == "Symbol(x)"
assert purestr(Basic(1, 2)) == "Basic(1, 2)"
def test_styleof():
styles = [(Basic, {'color': 'blue', 'shape': 'ellipse'}),
(Expr, {'color': 'black'})]
assert styleof(Basic(1), styles) == {'color': 'blue', 'shape': 'ellipse'}
x = Symbol('x')
assert styleof(x + 1, styles) == {'color': 'black', 'shape': 'ellipse'}
def test_attrprint():
assert attrprint({'color': 'blue', 'shape': 'ellipse'}) == \
'"color"="blue", "shape"="ellipse"'
def test_dotnode():
assert dotnode(x) ==\
'"Symbol(x)" ["color"="black", "label"="x", "shape"="ellipse"];'
assert dotnode(x+2) == \
'"Add(Integer(2), Symbol(x))" ["color"="black", "label"="Add", "shape"="ellipse"];'
def test_dotedges():
assert sorted(dotedges(x+2)) == [
'"Add(Integer(2), Symbol(x))" -> "Integer(2)";',
'"Add(Integer(2), Symbol(x))" -> "Symbol(x)";'
]
def test_dotprint():
text = dotprint(x+2)
assert all(e in text for e in dotedges(x+2))
assert all(n in text for n in map(dotnode, (x, Integer(2), x+2)))
assert 'digraph' in text
| Python | 0.00008 | |
4567a9810b8c9abdb450a442c892dbdb4eecf0e0 | Add test.py to test gsutil in pantheon | vm_server/accept/test.py | vm_server/accept/test.py | from google.cloud import storage
bucket_name = "automation-interns"
destination_file_name = ("./text.txt")
source_blob_name = "test/text_file.txt"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name) | Python | 0.000001 | |
a43acda7271c3fc48a82552721aec1332e9892d6 | Create OpticalDensityInv.py | OpticalDensityInv.py | OpticalDensityInv.py | import numpy
def OpticalDensityInv( I ):
'''
Transforms input RGB image "I" into optical density space for color deconvolution.
*Inputs:
I (rgbimage) - a floating-point image of optical density values obtained
from OpticalDensityFwd.
*Outputs:
Out (rgbimage) - a floating-point multi-channel intensity image with
values in range 0-255.
*Related functions:
OpticalDensityFwd, ColorDeconvolution, ColorConvolution
'''
return numpy.exp(-(I - 255)*numpy.log(255)/255);
| Python | 0.000001 | |
fa049b79c24f8213fa9335a31a34c354faf67459 | Add exmaple about proving equivalence of exprs | src/examples/python/proving_equivalence.py | src/examples/python/proving_equivalence.py | #!/usr/bin/env python
## -*- coding: utf-8 -*-
##
## $ python ./proving equivalence.py
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
## True
##
import sys
from triton import *
def prove(ctx, n):
ast = ctx.getAstContext()
if ctx.isSat(ast.lnot(n)) == True:
return False
return True
if __name__ == '__main__':
ctx = TritonContext(ARCH.X86_64)
ast = ctx.getAstContext()
ctx.setAstRepresentationMode(AST_REPRESENTATION.PYTHON)
x = ast.variable(ctx.newSymbolicVariable(8, 'x'))
y = ast.variable(ctx.newSymbolicVariable(8, 'y'))
# MBA coming from VMProtect https://whereisr0da.github.io/blog/posts/2021-02-16-vmp-3/
# To detect their equivalence you can synthesize them (see synthesizing_obfuscated_expressions.py)
# Then you can confirm the synthesized output with this example
print(prove(ctx, x ^ y == (~(~(x) & ~(y)) & ~(~(~(x)) & ~(~(y))))))
print(prove(ctx, x + y == ((~(~(x)) & ~(~(y))) + (~(~(x)) | ~(~(y))))))
print(prove(ctx, x + y == ((~(~(y)) | ~(~(x))) + ~(~(x)) - (~(~(x)) & ~(~(~(y)))))))
print(prove(ctx, x + y == ((~(~(x)) | ~(~(y))) + (~(~(~(x))) | ~(~(y))) - (~(~(~(x)))))))
print(prove(ctx, x + y == ((~(~(x)) | ~(~(y))) + ~(~(y)) - (~(~(~(x))) & ~(~(y))))))
print(prove(ctx, x + y == (~(~(y)) + (~(~(x)) & ~(~(~(y)))) + (~(~(x)) & ~(~(y))))))
print(prove(ctx, x - y == (~(~(x) + y))))
print(prove(ctx, ~((x | y) - x) == (~(((~(~(x)) | y) - (~(~(x))))))))
print(prove(ctx, x - y == (~((~(x) & ~(x)) + y) & ~((~(x) & ~(x)) + y))))
print(prove(ctx, x & y == ((~(~(x)) | y) - (~(~(~(x))) & y) - (~(~(x)) & ~y))))
print(prove(ctx, x & y == ((~(~(~(x))) | y) - (~(~(~(x)))))))
print(prove(ctx, x | y == ((~(~(x)) & ~(y)) + y)))
print(prove(ctx, x | y == (((~(~(x)) & ~(y)) & y) + ((~(~(x)) & ~(y)) | y))))
print(prove(ctx, x + y == ((~(~(x)) & ~(~(y))) + (~(~(x)) | ~(~(y))))))
sys.exit(0)
| Python | 0.000119 | |
3c997e3a9eb92c3053c521f6c2fff6cfdf99c126 | add setup.py | setup.py | setup.py | # noqa: D100
import os
import re
from setuptools import setup
requirements_txt = open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).read()
requirements = re.findall(r'^([^\s#]+)', requirements_txt, re.M)
setup(name='assignment_dashboard',
packages=['assignment_dashboard'],
include_package_data=True,
version='0.1',
description="A web app that inspects forks of an GitHub assignment repo",
long_description="Display the a GitHub repo's forks, by file, and collate Jupyter notebooks",
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3'
'Programming Language :: Python :: 3.5'
],
url='http://github.com/osteele/assignment-dashboard',
author='Oliver Steele',
author_email='steele@osteele.com',
license='MIT',
install_requires=requirements
)
| Python | 0.000001 | |
11cf7dd63f8fe7453057ef0846d4e645fa05f124 | Add setuptools setup.py | setup.py | setup.py | from setuptools import setup
setup(name='pybeam',
version='0.1',
description='Python module to parse Erlang BEAM files',
url='http://github.com/matwey/pybeam',
author='Matwey V. Kornilov',
author_email='matwey.kornilov@gmail.com',
license='MIT',
packages=['pybeam'],
install_requires=['construct'],
zip_safe=False)
| Python | 0.000001 | |
555dac76a8810cfeaae96f8de04e9eb3362a3314 | Remove old notification status column | migrations/versions/0109_rem_old_noti_status.py | migrations/versions/0109_rem_old_noti_status.py | """
Revision ID: 0109_rem_old_noti_status
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-10 14:25:15.712055
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0109_rem_old_noti_status'
down_revision = '0108_change_logo_not_nullable'
def upgrade():
op.drop_column('notification_history', 'status')
op.drop_column('notifications', 'status')
def downgrade():
op.add_column(
'notifications',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
op.add_column(
'notification_history',
sa.Column(
'status',
postgresql.ENUM(
'created', 'sending', 'delivered', 'pending', 'failed', 'technical-failure',
'temporary-failure', 'permanent-failure', 'sent', name='notify_status_type'
),
autoincrement=False,
nullable=True
)
)
| Python | 0.000001 | |
21a67556b83b7905134439d55afe33c35e4b3422 | Add an index on notifications for (service_id, created_at) to improve the performance of the notification queries. We've already performed this update on production since you need to create the index concurrently, which is not allowed from the alembic script. For that reason we are checking if the index exists. | migrations/versions/0246_notifications_index.py | migrations/versions/0246_notifications_index.py | """
Revision ID: 0246_notifications_index
Revises: 0245_archived_flag_jobs
Create Date: 2018-12-12 12:00:09.770775
"""
from alembic import op
revision = '0246_notifications_index'
down_revision = '0245_archived_flag_jobs'
def upgrade():
conn = op.get_bind()
conn.execute(
"CREATE INDEX IF NOT EXISTS ix_notifications_service_created_at ON notifications (service_id, created_at)"
)
def downgrade():
conn = op.get_bind()
conn.execute(
"DROP INDEX IF EXISTS ix_notifications_service_created_at"
)
| Python | 0 | |
1337c19df3ccecf5739c58a719742d970c7faa14 | Calculate LDA | build_topics.py | build_topics.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import os
from gensim import corpora
from gensim.models.ldamulticore import LdaMulticore
from common import SimpleTokenizer
def parse_args():
description = '''
Finds topics from reviews
'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('prefix')
parser.add_argument('--no_below', type=int, default=5)
parser.add_argument('--no_above', type=float, default=0.95)
parser.add_argument('--num_topics', type=int, default=64)
parser.add_argument('--workers')
return parser.parse_args()
class ReviewCorpus(object):
def __init__(self, filename, dictionary):
self.filename = filename
self.dictionary = dictionary
self.tokenizer = SimpleTokenizer()
def __iter__(self):
with open(self.filename) as f:
for line in f:
review = json.loads(line)
tokens = self.tokenizer.tokenize(review)
yield self.dictionary.doc2bow(tokens)
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
args = parse_args()
dictionary = corpora.Dictionary.load(os.path.join(args.prefix, 'review.dict'))
logging.info('Pruning dictionary')
dictionary.filter_extremes(no_below=args.no_below,
no_above=args.no_above)
corpus = ReviewCorpus(os.path.join(args.prefix, 'review.json'),
dictionary)
logging.info('Computing LDA model')
lda = LdaMulticore(corpus, num_topics=args.num_topics, id2word=dictionary,
workers=args.workers)
logging.info('Persisting LDA model')
lda.save(os.path.join(args.prefix, 'review.ldamodel'))
if __name__ == '__main__':
main()
| Python | 0.998653 | |
86b2f32bd212a14e904b9823fbf543b321f46ca7 | Add very basic setup.py | setup.py | setup.py | from distutils.core import setup
setup(name='astcheck',
version='0.1',
py_modules=['astcheck'],
) | Python | 0.000001 | |
523a6fe005149bf0a8a91cd81c9f692f5aaaf1c9 | fix description | farmer/models.py | farmer/models.py | #coding=utf8
import os
import sys
import time
from psutil import Process
from datetime import datetime
from threading import Thread
from ansible.runner import Runner
from ansible.inventory import Inventory
from django.db import models
from farmer.settings import WORKER_TIMEOUT, ANSIBLE_FORKS
class Task(models.Model):
# hosts, like web_servers:host1 .
inventory = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
# submitter
farmer = models.TextField(null = False, blank = False)
start = models.DateTimeField(null = True, blank = False)
end = models.DateTimeField(null = True, blank = False)
def run(self):
t = Thread(target = self._run)
t.setDaemon(True)
t.start()
def _run(self):
self.start = datetime.now()
self.save()
# initial jobs
for host in map(lambda i: i.name, Inventory().get_hosts(pattern = self.inventory)):
self.job_set.add(Job(host = host, cmd = self.cmd, start = datetime.now()))
self.save()
runner = Runner(module_name = 'shell', module_args = self.cmd, \
pattern = self.inventory, sudo = self.sudo, forks = ANSIBLE_FORKS)
_, poller = runner.run_async(time_limit = WORKER_TIMEOUT)
now = time.time()
while True:
if poller.completed:
break
if time.time() - now > WORKER_TIMEOUT: # TIMEOUT
break
results = poller.poll()
results = results.get('contacted')
if results:
for host, result in results.items():
job = self.job_set.get(host = host)
job.end = result.get('end')
job.rc = result.get('rc')
job.stdout = result.get('stdout')
job.stderr = result.get('stderr')
job.save()
time.sleep(1)
jobs_timeout = filter(lambda job: job.rc is None, self.job_set.all())
jobs_failed = filter(lambda job: job.rc, self.job_set.all())
for job in jobs_timeout:
job.rc = 1
job.stderr = 'JOB TIMEOUT' # marked as 'TIMEOUT'
job.save()
self.rc = (jobs_timeout or jobs_failed) and 1 or 0
self.end = datetime.now()
self.save()
self.done()
def done(self):
try:
myself = Process(os.getpid())
for child in myself.get_children():
child.kill()
except Exception as e:
sys.stderr.write(str(e) + '\n')
def __unicode__(self):
return self.inventory + ' -> ' + self.cmd
class Job(models.Model):
task = models.ForeignKey(Task)
host = models.TextField(null = False, blank = False)
cmd = models.TextField(null = False, blank = False)
start = models.DateTimeField(null = True, blank = False)
end = models.DateTimeField(null = True, blank = False)
rc = models.IntegerField(null = True)
stdout = models.TextField(null = True)
stderr = models.TextField(null = True)
def __unicode__(self):
return self.host + ' : ' + self.cmd
| #coding=utf8
import os
import sys
import time
from psutil import Process
from datetime import datetime
from threading import Thread
from ansible.runner import Runner
from ansible.inventory import Inventory
from django.db import models
from farmer.settings import WORKER_TIMEOUT, ANSIBLE_FORKS
class Task(models.Model):
# hosts, like web_servers:host1 .
inventory = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
# submitter
farmer = models.TextField(null = False, blank = False)
start = models.DateTimeField(null = True, blank = False)
end = models.DateTimeField(null = True, blank = False)
def run(self):
t = Thread(target = self._run)
t.setDaemon(True)
t.start()
def _run(self):
self.start = datetime.now()
self.save()
# initial jobs
for host in map(lambda i: i.name, Inventory().get_hosts(pattern = self.inventory)):
self.job_set.add(Job(host = host, cmd = self.cmd, start = datetime.now()))
self.save()
runner = Runner(module_name = 'shell', module_args = self.cmd, \
pattern = self.inventory, sudo = self.sudo, forks = ANSIBLE_FORKS)
_, poller = runner.run_async(time_limit = WORKER_TIMEOUT)
now = time.time()
while True:
if poller.completed:
break
if time.time() - now > WORKER_TIMEOUT: # TIMEOUT
break
results = poller.poll()
results = results.get('contacted')
if results:
for host, result in results.items():
job = self.job_set.get(host = host)
job.end = result.get('end')
job.rc = result.get('rc')
job.stdout = result.get('stdout')
job.stderr = result.get('stderr')
job.save()
time.sleep(1)
jobs_timeout = filter(lambda job: job.rc is None, self.job_set.all())
jobs_failed = filter(lambda job: job.rc, self.job_set.all())
for job in jobs_timeout:
job.rc = 1
job.stderr = 'TIMEOUT' # marked as 'TIMEOUT'
job.save()
self.rc = (jobs_timeout or jobs_failed) and 1 or 0
self.end = datetime.now()
self.save()
self.done()
def done(self):
try:
myself = Process(os.getpid())
for child in myself.get_children():
child.kill()
except Exception as e:
sys.stderr.write(str(e) + '\n')
def __unicode__(self):
return self.inventory + ' -> ' + self.cmd
class Job(models.Model):
task = models.ForeignKey(Task)
host = models.TextField(null = False, blank = False)
cmd = models.TextField(null = False, blank = False)
start = models.DateTimeField(null = True, blank = False)
end = models.DateTimeField(null = True, blank = False)
rc = models.IntegerField(null = True)
stdout = models.TextField(null = True)
stderr = models.TextField(null = True)
def __unicode__(self):
return self.host + ' : ' + self.cmd
| Python | 0.020455 |
5acc7d50cbe199af49aece28b95ea97484ae31c7 | Add solution class for Ghia et al. (1982) | snake/solutions/ghiaEtAl1982.py | snake/solutions/ghiaEtAl1982.py | """
Implementation of the class `GhiaEtAl1982` that reads the centerline velocities
reported in Ghia et al. (1982).
_References:_
* Ghia, U. K. N. G., Ghia, K. N., & Shin, C. T. (1982).
High-Re solutions for incompressible flow using the Navier-Stokes equations
and a multigrid method.
Journal of computational physics, 48(3), 387-411.
"""
import os
import numpy
class GhiaEtAl1982(object):
"""
Container to store results from Ghia et al. (1982).
"""
def __init__(self, Re=None, file_path=None):
"""
Initialization.
Parameters
----------
Re: float, optional
Desired Reynolds number;
default: None.
file_path: string, optional
Path of the file containing the validation data;
default: None.
"""
self.y, self.u = None, None
self.x, self.v = None, None
if Re:
self.read_centerline_velocities(Re, file_path=file_path)
def read_centerline_velocities(self, Re, file_path=None):
"""
Reads the centerline velocities from file and for a given Reynolds number.
Parameters
----------
Re: float
Desired Reynolds number.
file_path: string, optional
Path of the file containing the validation data;
default: None (will be read the file located in `resources` directory of
the `snake` package).
"""
if not file_path:
file_path = os.path.join(os.environ['SNAKE'],
'resources',
'validationData',
'ghia_et_al_1982_lid_driven_cavity.dat')
Re = str(int(round(Re)))
# column indices in file with experimental results
cols = {'100': {'u': 1, 'v': 7},
'1000': {'u': 2, 'v': 8},
'3200': {'u': 3, 'v': 9},
'5000': {'u': 4, 'v': 10},
'10000': {'u': 5, 'v': 11}}
with open(file_path, 'r') as infile:
y, u, x, v = numpy.loadtxt(infile,
dtype=float,
usecols=(0, cols[Re]['u'], 6, cols[Re]['v']),
unpack=True)
self.y, self.u = y, u
self.x, self.v = x, v
| Python | 0 | |
a893a8f9375164cbbec4e276ae73f181f74fd9ae | create image,py | src/image.py | src/image.py | #
# image.py
# Created by pira on 2017/07/28.
#
#coding: utf-8 | Python | 0.000001 | |
14068a2e3ca445c02895aed38420baf846338aae | Add smile detection example script. | scripts/examples/25-Machine-Learning/nn_haar_smile_detection.py | scripts/examples/25-Machine-Learning/nn_haar_smile_detection.py | # Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565
sensor.set_framesize(sensor.QQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# Load smile detection network
net = nn.load('/smile.network')
# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# Find faces.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# Detect smiles
for r in objects:
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0]/127 > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
| Python | 0 | |
b31e7a3471daefb79b1d63a433c480cf51b75745 | Create __init__.py | FireModules/FileDownloads/AccountBruting/__init__.py | FireModules/FileDownloads/AccountBruting/__init__.py | Python | 0.000429 | ||
11504d8087e963e4683d5dd3a0101772832e0c81 | Increase unit test coverage for Package Base API. | murano/tests/unit/packages/test_package_base.py | murano/tests/unit/packages/test_package_base.py | # Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import random
import semantic_version
import shutil
import string
import tempfile
from murano.packages import exceptions
from murano.packages import package_base
import murano.tests.unit.base as test_base
class TestPackageBase(test_base.MuranoTestCase):
@classmethod
def setUpClass(cls):
super(TestPackageBase, cls).setUpClass()
package_base.PackageBase.__abstractmethods__ = set()
cls.source_directory = tempfile.mkdtemp(dir=tempfile.tempdir)
cls.version = semantic_version.Version.coerce('1.2.3')
cls.mock_manifest = {
'Name': 'mock_display_name',
'FullName': 'mock_full_name',
'Type': 'Application',
'Version': '1.2.3',
'Description': 'test_description',
'Author': 'test_author',
'Supplier': 'test_supplier',
'Tags': ['tag1', 'tag2', 'tag3'],
'Logo': None
}
cls.package_base = package_base.PackageBase('test_format',
'test_runtime_version',
cls.source_directory,
cls.mock_manifest)
@classmethod
def tearDownClass(cls):
if os.path.isdir(cls.source_directory):
shutil.rmtree(cls.source_directory)
def test_create_package_base_without_full_name(self):
with self.assertRaisesRegexp(exceptions.PackageFormatError,
'FullName is not specified'):
package_base.PackageBase('test_format',
'test_runtime_version',
'test_source_directory',
manifest={'FullName': None})
def test_create_package_base_with_invalid_full_name(self):
full_names = ['.invalid_name_1', 'invalid..name..2', 'invalid name 3']
for full_name in full_names:
expected_error_message = 'Invalid FullName {0}'.format(full_name)
with self.assertRaisesRegexp(exceptions.PackageFormatError,
expected_error_message):
package_base.PackageBase('test_format',
'test_runtime_version',
'test_source_directory',
manifest={'FullName': full_name})
def test_create_package_base_with_invalid_type(self):
package_type = 'Invalid'
with self.assertRaisesRegexp(exceptions.PackageFormatError,
'Invalid package Type {0}'
.format(package_type)):
package_base.PackageBase('test_format',
'test_runtime_version',
'test_source_directory',
manifest={'FullName': 'mock_full_name',
'Type': package_type})
def test_requirements_negative(self):
with self.assertRaisesRegexp(NotImplementedError, None):
self.package_base.requirements
def test_classes_negative(self):
with self.assertRaisesRegexp(NotImplementedError, None):
self.package_base.classes
def test_get_class_negative(self):
with self.assertRaisesRegexp(NotImplementedError, None):
self.package_base.get_class(None)
def test_ui_negative(self):
with self.assertRaisesRegexp(NotImplementedError, None):
self.package_base.ui
def test_full_name(self):
self.assertEqual(self.mock_manifest['FullName'],
self.package_base.full_name)
def test_source_directory(self):
self.assertEqual(self.source_directory,
self.package_base.source_directory)
def test_version(self):
self.assertEqual(self.version,
self.package_base.version)
def test_package_type(self):
self.assertEqual(self.mock_manifest['Type'],
self.package_base.package_type)
def test_display_name(self):
self.assertEqual(self.mock_manifest['Name'],
self.package_base.display_name)
def test_description(self):
self.assertEqual(self.mock_manifest['Description'],
self.package_base.description)
def test_author(self):
self.assertEqual(self.mock_manifest['Author'],
self.package_base.author)
def test_supplier(self):
self.assertEqual(self.mock_manifest['Supplier'],
self.package_base.supplier)
def test_tags(self):
self.assertEqual(self.mock_manifest['Tags'],
self.package_base.tags)
def test_logo_without_file_name(self):
self.assertIsNone(self.package_base.logo)
def test_logo_with_invalid_logo_path(self):
expected_error_message = 'Unable to load logo'
self.package_base._logo = ''.join(random.choice(string.ascii_letters)
for _ in range(10))
with self.assertRaisesRegexp(exceptions.PackageLoadError,
expected_error_message):
self.package_base.logo
self.package_base._logo = self.mock_manifest['Logo']
@mock.patch('murano.packages.package_base.imghdr',
what=mock.MagicMock(return_value='xyz'))
def test_load_image_with_invalid_extension(self, mock_imghdr):
expected_error_message = 'Unsupported Format.'
with self.assertRaisesRegexp(exceptions.PackageLoadError,
expected_error_message):
self.package_base._load_image('logo.xyz', 'logo.xyz', 'logo')
full_path = os.path.join(self.package_base._source_directory,
'logo.xyz')
mock_imghdr.what.assert_called_once_with(full_path)
@mock.patch('murano.packages.package_base.imghdr',
what=mock.MagicMock(return_value='png'))
@mock.patch('murano.packages.package_base.os')
def test_load_image_with_oversized_image(self, mock_os, mock_imghdr):
mock_os.stat.return_value = mock.MagicMock(st_size=5000 * 1024)
mock_os.isfile = mock.MagicMock(return_value=True)
expected_error_message = 'Max allowed size is {0}'.format(500 * 1024)
with self.assertRaisesRegexp(exceptions.PackageLoadError,
expected_error_message):
self.package_base._load_image('logo.xyz', 'logo.xyz', 'logo')
def test_meta(self):
self.assertIsNone(self.package_base.meta)
def test_get_resource(self):
test_name = 'test_resource_name'
expected_dir = os.path.join(self.source_directory, 'Resources',
test_name)
self.assertEqual(expected_dir, self.package_base.get_resource(
test_name))
| Python | 0.000003 | |
7a4df9d8c385ed53e29e5171c115939920a271b3 | Add a setup.py script | setup.py | setup.py | # Use the setuptools package if it is available. It's preferred
# because it creates an exe file on Windows for Python scripts.
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(name='csv_util',
entry_points={'console_scripts': [
# 'EXECUTABLE_NAME = csv_util.scripts.script_module_name:entry_function_name'
]
}) | Python | 0.000001 | |
1e7548a5b237f18c3bf5918a2254d04125492372 | Add setup script | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='rapidtest',
version='0.1',
author='Simon Zhang',
license='MIT',
packages=find_packages(),
install_requires=[])
| Python | 0.000001 | |
61fcca809b31372bb5e793359df243cff5ee23cf | Add the setup.py file | setup.py | setup.py | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='fedmsg_fasclient',
version='0.1',
description='A fedmsg consumer that runs the fasClient based on fedmsg FAS messages',
license="LGPLv2+",
author='Janez Nemanič, Ralph Bean and Pierre-Yves Chibon',
author_email='admin@fedoraproject.org',
url='https://github.com/fedora-infra/fedmsg-fasclient',
install_requires=["fedmsg"],
packages=[],
py_modules=['fedmsg_fasclient'],
entry_points="""
[moksha.consumer]
fedmsg_fasclient = fedmsg_fasclient:FasClientConsumer
""",
)
| Python | 0.000002 | |
139123ddb81eec12d0f932ff6ff73aadb4b418cc | Add decorator to make a Node class from a regular function | ocradmin/lib/nodetree/decorators.py | ocradmin/lib/nodetree/decorators.py | """
Nodetree decorators.
"""
import inspect
import textwrap
import node
def underscore_to_camelcase(value):
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(c.next()(x) if x else '_' for x in value.split("_"))
def upper_camelcase(value):
value = underscore_to_camelcase(value)
return value[0].capitalize() + value[1:]
class makenode(object):
"""Decorate for constructing a node out
of a single function."""
def __init__(self, intypes, outtype, **kwargs):
self.intypes = intypes
self.outtype = outtype
self.kwargs = kwargs
def __call__(self, fun):
argspec = inspect.getargspec(fun)
def _eval(self):
args = [self.eval_input(i) for i in range(len(argspec.args))]
return fun(*args)
doc = fun.__doc__ if not fun.__doc__ is None \
else "No description provided"
clsname = upper_camelcase(fun.__name__)
ns = upper_camelcase(fun.__module__.split(".")[-1])
clsdict = dict(
__module__ = fun.__module__,
__doc__ = doc,
_eval = _eval,
arity = len(self.intypes),
intypes = self.intypes,
outtype = self.outtype,
description = textwrap.dedent(fun.__doc__),
name = "%s::%s" % (ns, clsname),
)
clsdict.update(self.kwargs)
return type(clsname + "Node", (node.Node,), clsdict)()
| Python | 0.000001 | |
fe7f07cbd9ff9844efa2b191a900f6efb9de576e | add db model file | model/db.py | model/db.py | # db model - all db handlers
| Python | 0 | |
8ec524a7a64c55f0759e18ea4b70c63c9c83f99a | Add admin for the various models | pombola/interests_register/admin.py | pombola/interests_register/admin.py | from django.contrib import admin
from . import models
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['slug', 'name', 'sort_order']
search_fields = ['name']
class ReleaseAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['slug', 'name', 'date']
search_fields = ['name']
date_hierarchy = 'date'
class LineItemInlineAdmin(admin.TabularInline):
model = models.EntryLineItem
# extra = 2
fields = [ 'key', 'value' ]
class EntryAdmin(admin.ModelAdmin):
inlines = [LineItemInlineAdmin]
list_display = ['id', 'person', 'category', 'release', 'sort_order']
list_filter = [ 'release', 'category' ]
search_fields = ['person__legal_name']
# Add these to the admin
admin.site.register( models.Category, CategoryAdmin)
admin.site.register( models.Release, ReleaseAdmin)
admin.site.register( models.Entry, EntryAdmin)
| Python | 0 | |
a4f49b988a10afc160c217d32da46ea854059e8c | Add migration file | ureport/polls/migrations/0060_populate_category_displayed.py | ureport/polls/migrations/0060_populate_category_displayed.py | # Generated by Django 2.2.10 on 2020-05-05 15:01
from django.db import migrations
def noop(apps, schema_editor): # pragma: no cover
pass
def populate_category_displayed(apps, schema_editor): # pragma: no cover
PollResponseCategory = apps.get_model("polls", "PollResponseCategory")
updated = 0
for obj in PollResponseCategory.objects.all().exclude(category=None):
PollResponseCategory.objects.filter(id=obj.id).update(category_displayed=obj.category)
updated += 1
if updated > 0:
print(f"populated {updated} poll response categories")
class Migration(migrations.Migration):
dependencies = [
("polls", "0059_pollresponsecategory_category_displayed"),
]
operations = [migrations.RunPython(populate_category_displayed, noop)]
| Python | 0.000001 | |
b7cd3081585c0a4695db4f85b7db8e346a525e23 | add to pypi | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name="libraw.py",
version="1.0",
description="python bindings using ctypes for libraw",
url="https://github.com/paroj/libraw.py",
author="Pavel Rojtberg",
license="LGPLv2",
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
py_modules=["libraw"]
)
| Python | 0 | |
88cb2155d55100d9b00dca1ecf4f9a01dec7c3f5 | Add missing 'import os' for integrationtest/vm/basic/suite_setup.py | integrationtest/vm/basic/suite_setup.py | integrationtest/vm/basic/suite_setup.py | '''
@author: Frank
'''
import os
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
def test():
setup = setup_actions.SetupAction()
setup.plan = test_lib.all_config
setup.run()
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
deploy_operations.deploy_initial_database(test_lib.deploy_config)
test_util.test_pass('Suite Setup Success')
| '''
@author: Frank
'''
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
def test():
setup = setup_actions.SetupAction()
setup.plan = test_lib.all_config
setup.run()
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
deploy_operations.deploy_initial_database(test_lib.deploy_config)
test_util.test_pass('Suite Setup Success')
| Python | 0.000011 |
f0205534cd1c812db94921b29ebef4207039e56b | work in progress (kind of) | hamster_sun.py | hamster_sun.py | #!/usr/bin/env python
# - coding: utf-8 -
# Copyright (C) 2010 Toms Bauģis <toms.baugis at gmail.com>
"""Base template"""
import gtk
from lib import graphics
import math
import hamster.client
import datetime as dt
from collections import defaultdict
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self)
storage = hamster.client.Storage()
self.facts = storage.get_facts(dt.date(2007,1,1), dt.date.today())
self.day_counts = {}
activities, categories = defaultdict(int), defaultdict(int)
print len(self.facts)
for fact in self.facts:
self.day_counts.setdefault(fact['start_time'].date(), defaultdict(list))
self.day_counts[fact['start_time'].date()][fact['category']].append(fact)
activities[fact['name']] += 1
categories[fact['category']] += 1
if fact['end_time'] and fact['start_time'].date() != fact['end_time'].date():
self.day_counts.setdefault(fact['end_time'].date(), defaultdict(list))
self.day_counts[fact['end_time'].date()][fact['category']].append(fact)
self.activities = [activity[0] for activity in sorted(activities.items(), key=lambda item:item[1], reverse=True)]
self.categories = categories.keys()
self.connect("on-enter-frame", self.on_enter_frame)
def on_enter_frame(self, scene, context):
g = graphics.Graphics(context)
step = (360.0 / 365) * math.pi / 180.0
g.set_color("#999")
g.set_line_style(width = 1)
"""
for i in range(365):
g.move_to(self.width / 2, self.height / 2)
g.rel_line_to(math.cos(step * i) * 300,
math.sin(step * i) * 300)
g.stroke()
"""
colors = ("#ff0000", "#00ff00", "#0000ff", "#aaa000")
for day in self.day_counts:
year_day = day.timetuple().tm_yday
angle = year_day * step
for j, category in enumerate(self.day_counts[day]):
distance = 20 * (day.year - 2005) + self.categories.index(category) * 60 + 30
color = colors[self.categories.index(category)]
delta = dt.timedelta()
for fact in self.day_counts[day][category]:
delta += fact['delta']
hours = delta.seconds / 60 / 60
height = hours / 16.0 * 20
g.set_color(color)
#bar per category
g.move_to(math.cos(angle) * distance + self.width / 2,
math.sin(angle) * distance + self.height / 2)
g.line_to(math.cos(angle) * (distance + height) + self.width / 2 ,
math.sin(angle) * (distance + height) + self.height / 2)
g.line_to(math.cos(angle+step) * (distance + height) + self.width / 2 ,
math.sin(angle+step) * (distance + height) + self.height / 2)
g.line_to(math.cos(angle+step) * distance + self.width / 2,
math.sin(angle+step) * distance + self.height / 2)
g.close_path()
#g.fill_preserve()
g.stroke()
g.fill("#aaa")
for i, color in enumerate(colors):
g.move_to(0, i * 20)
g.set_color(color)
g.show_text(self.categories[i])
class BasicWindow:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_size_request(600, 500)
window.connect("delete_event", lambda *args: gtk.main_quit())
window.add(Scene())
window.show_all()
example = BasicWindow()
gtk.main()
| Python | 0 | |
e3cbc79cc60e21978fe682b73413e9de19b71543 | add a print hello world function | helloAlyssa.py | helloAlyssa.py | #This is my hello world program
print ('Hello World')
| Python | 0.999999 | |
9339307b6bd42ad014e528d337fc9f195c632245 | Add tick class | zaifbot/exchange/tick.py | zaifbot/exchange/tick.py | class Tick:
def __init__(self, currency_pair):
self.size = currency_pair.info['aux_unit_step']
self._decimal_digits = currency_pair.info['aux_unit_point']
def truncate_price(self, price):
remainder = price % self.size
truncated_price = price - remainder
if self._decimal_digits == 0:
return int(truncated_price)
return truncated_price
| Python | 0.000001 | |
d9d84083a488ad1b4643298d7a75b54b4e0e34be | add OptionChainConsistencyRegressionAlgorithm | Algorithm.Python/OptionChainConsistencyRegressionAlgorithm.py | Algorithm.Python/OptionChainConsistencyRegressionAlgorithm.py | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Indicators import *
from datetime import datetime, timedelta
### <summary>
### This regression algorithm checks if all the option chain data coming to the algo is consistent with current securities manager state
### </summary>
### <meta name="tag" content="regression test" />
### <meta name="tag" content="options" />
### <meta name="tag" content="using data" />
### <meta name="tag" content="filter selection" />
class OptionChainConsistencyRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetCash(10000)
self.SetStartDate(2015,12,24)
self.SetEndDate(2015,12,24)
equity = self.AddEquity("GOOG")
option = self.AddOption("GOOG")
# set our strike/expiry filter for this option chain
option.SetFilter(self.UniverseFunc)
self.SetBenchmark(equity.Symbol)
self.OptionSymbol = option.Symbol
equity.SetDataNormalizationMode(DataNormalizationMode.Raw)
def OnData(self, slice):
if self.Portfolio.Invested: return
for kvp in slice.OptionChains:
chain = kvp.Value
for o in chain:
if not self.Securities.ContainsKey(o.Symbol):
# inconsistency found: option chains contains contract information that is not available in securities manager and not available for trading
self.Log("inconsistency found: option chains contains contract {0} that is not available in securities manager and not available for trading".format(o.Symbol.Value))
contracts = filter(lambda x: x.Expiry.date() == self.Time.date() and
x.Strike < chain.Underlying.Price and
x.Right == OptionRight.Call, chain)
sorted_contracts = sorted(contracts, key = lambda x: x.Strike, reverse = True)
if len(sorted_contracts) > 2:
self.MarketOrder(sorted_contracts[2].Symbol, 1)
self.MarketOnCloseOrder(sorted_contracts[2].Symbol, -1)
# set our strike/expiry filter for this option chain
def UniverseFunc(self, universe):
return universe.IncludeWeeklys().Strikes(-2, 2).Expiration(timedelta(0), timedelta(10))
def OnOrderEvent(self, orderEvent):
self.Log(str(orderEvent)) | using System;
namespace QuantConnect.Algorithm.Python
{
public class OptionChainConsistencyRegressionAlgorithm
{
public OptionChainConsistencyRegressionAlgorithm()
{
}
}
}
| Python | 0.000001 |
aafb77596ae0cb6c27b2564434367d2b4d5debd1 | Add tests | Orange/widgets/visualize/tests/test_owscatterplot.py | Orange/widgets/visualize/tests/test_owscatterplot.py | import numpy as np
from Orange.data import Table
from Orange.widgets.tests.base import WidgetTest
from Orange.widgets.visualize.owscatterplot import OWScatterPlot
class TestOWScatterPlot(WidgetTest):
def setUp(self):
self.widget = self.create_widget(OWScatterPlot)
self.data = Table("iris")
def test_set_data(self):
self.widget.set_data(self.data)
self.assertEqual(self.widget.data, self.data)
self.assertEqual(self.widget.subset_data, None)
def test_subset_data(self):
self.widget.set_subset_data(self.data[:30])
self.assertEqual(len(self.widget.subset_data), 30)
self.assertEqual(self.widget.data, None)
np.testing.assert_array_equal(self.widget.subset_data, self.data[:30])
def test_set_data_none(self):
self.widget.set_data(None)
self.assertEqual(self.widget.data, None)
self.assertEqual(self.widget.subset_data, None)
def test_subset_data_none(self):
self.widget.set_subset_data(None)
self.assertEqual(self.widget.subset_data, None)
self.assertEqual(self.widget.data, None)
| Python | 0 | |
47ad7f4d3b69315e25ae96099fe73b4d9cd7666e | Use file extension to select config file parser | dotbot/config.py | dotbot/config.py | import yaml
import json
import os.path
from .util import string
class ConfigReader(object):
def __init__(self, config_file_path):
self._config = self._read(config_file_path)
def _read(self, config_file_path):
try:
_, ext = os.path.splitext(config_file_path)
with open(config_file_path) as fin:
print ext
if ext == '.json':
data = json.load(fin)
else:
data = yaml.safe_load(fin)
return data
except Exception as e:
msg = string.indent_lines(str(e))
raise ReadingError('Could not read config file:\n%s' % msg)
def get_config(self):
return self._config
class ReadingError(Exception):
pass
| import yaml
import json
from .util import string
class ConfigReader(object):
def __init__(self, config_file_path):
self._config = self._read(config_file_path)
def _read(self, config_file_path):
try:
with open(config_file_path) as fin:
try:
data = yaml.safe_load(fin)
except Exception as e:
# try falling back to JSON, but return original exception
# if that fails too
try:
fin.seek(0)
data = json.load(fin)
except Exception:
raise e
return data
except Exception as e:
msg = string.indent_lines(str(e))
raise ReadingError('Could not read config file:\n%s' % msg)
def get_config(self):
return self._config
class ReadingError(Exception):
pass
| Python | 0 |
fec74a5401f925755484955a1b38dd3044824eb3 | Create npy2ckpt.py | npy2ckpt.py | npy2ckpt.py | """Conversion of the .npy weights into the .ckpt ones.
This script converts the weights of the DeepLab-ResNet model
from the numpy format into the TensorFlow one.
"""
from __future__ import print_function
import argparse
import os
import tensorflow as tf
import numpy as np
from deeplab_resnet import DeepLabResNetModel
SAVE_DIR = './'
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="NPY to CKPT converter.")
parser.add_argument("npy_path", type=str,
help="Path to the .npy file, which contains the weights.")
parser.add_argument("--save_dir", type=str, default=SAVE_DIR,
help="Where to save the converted .ckpt file.")
return parser.parse_args()
def save(saver, sess, logdir):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, write_meta_graph=False)
print('The weights have been converted to {}.'.format(checkpoint_path))
def main():
"""Create the model and start the training."""
args = get_arguments()
# Default image.
image_batch = tf.constant(0, tf.float32, shape=[1, 321, 321, 3])
# Create network.
net = DeepLabResNetModel({'data': image_batch})
var_list = tf.trainable_variables()
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
init = tf.initialize_all_variables()
sess.run(init)
# Loading .npy weights.
net.load(args.npy_path, sess)
# Saver for converting the loaded weights into .ckpt.
saver = tf.train.Saver(var_list=var_list)
save(saver, sess, args.save_dir)
if __name__ == '__main__':
main()
| Python | 0.000006 | |
8b5bf433b304895f04813c64d556316c48c046fe | add setup.py for distribute | setup.py | setup.py | #!/usr/bin/env python
import os, os.path
from distutils.core import setup, Extension
import distutils.msvccompiler
source_files = ["Engine.cpp", "Wrapper.cpp", "PyV8.cpp"]
macros = [("BOOST_PYTHON_STATIC_LIB", None)]
third_party_libraries = ["python", "boost", "v8"]
include_dirs = os.environ["INCLUDE"].split(';') + [os.path.join("lib", lib, "inc") for lib in third_party_libraries]
library_dirs = os.environ["LIB"].split(';') + [os.path.join("lib", lib, "lib") for lib in third_party_libraries]
libraries = ["winmm"]
pyv8 = Extension(name = "_PyV8",
sources = [os.path.join("src", file) for file in source_files],
define_macros = macros,
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = ["/O2", "/GL", "/MT", "/EHsc", "/Gy", "/Zi"],
extra_link_args = ["/DLL", "/OPT:REF", "/OPT:ICF", "/MACHINE:X86"],
)
setup(name='PyV8',
version='0.1',
description='Python Wrapper for Google V8 Engine',
author='Flier Lu',
author_email='flier.lu@gmail.com',
url='http://code.google.com/p/pyv8/',
license="Apache 2.0",
py_modules=['PyV8'],
ext_modules=[pyv8]
) | Python | 0.000001 | |
e24a354ae65db5874f51305b839a7ce553d44d78 | Build Sticks | GeoHat_V10/BuildSticks.py | GeoHat_V10/BuildSticks.py | #---------------------------------------------------------------------------------
# BuildSticks.py
#
# Description: Create sticks (lines between connected patches, with appropriate weights),
# from edge list csv file
#
# Requires: NetworkX to be stored in script folder (or installed)
# Create Edge List tool must be run first
#
# Inputs: <edge list> <Patch raster> <scratch directory>
# Output: <Patch connected attribute table (CSV format)>
#
# August 4, 2016
# Nathan Walker
# Building on code from John Fay
#
#---------------------------------------------------------------------------------
# Import modules
import sys, os, arcpy
import arcpy.sa as sa
##---FUNCTIONS---
# Message management
def msg(txt): print msg; arcpy.AddMessage(txt); return
# Input variables
edgeList = arcpy.GetParameterAsText(0)
patchRaster = arcpy.GetParameterAsText(1)
sticks = arcpy.GetParameterAsText(3)
# Output variables
outdir = arcpy.GetParameterAsText(2)
# set overwrite to true
arcpy.env.overwriteOutput = True
##---PROCESSES---
msg("Converting table to dbf")
# Convert csv to format that is editable and includes OID
edgeListDBF = arcpy.CopyRows_management(in_rows=edgeList, out_table=outdir + "/edgeList.dbf", config_keyword="")
# Add edge ID field
arcpy.AddField_management(in_table=edgeListDBF, field_name="EdgeID", field_type="LONG", field_precision="", field_scale="", field_length="", field_alias="", field_is_nullable="NULLABLE", field_is_required="NON_REQUIRED", field_domain="")
arcpy.CalculateField_management(edgeListDBF, "EdgeID", "!OID!", "PYTHON_9.3", "")
msg("Converting patch raster to polygon")
# Convert Raster to Polygon
patch_RtoP = arcpy.RasterToPolygon_conversion(patchRaster, "in_memory/Patch_RtoP", "NO_SIMPLIFY", "Value")
# Add X and Y fields to polygons, representing patch centroid locations
arcpy.AddField_management(patch_RtoP, "X", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management(patch_RtoP, "Y", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(patch_RtoP, "X", "!Shape.Centroid.X!", "PYTHON_9.3", "")
arcpy.CalculateField_management(patch_RtoP, "Y", "!Shape.Centroid.Y!", "PYTHON_9.3", "")
msg("Joining patch centroids to edge list")
# Join FromID to patch
arcpy.JoinField_management(edgeListDBF, "FromID", patch_RtoP, "GRIDCODE", "")
# Join ToID to patch
arcpy.JoinField_management(edgeListDBF, "ToID", patch_RtoP, "GRIDCODE", "")
msg("Convert X/Y start/end points to line")
# Create line from coordinates of From and To patches
arcpy.XYToLine_management(in_table=edgeListDBF, out_featureclass=sticks, startx_field="X", starty_field="Y", endx_field="X_1", endy_field="Y_1", line_type="GEODESIC", id_field="EdgeID", spatial_reference="PROJCS['WGS_1984_UTM_Zone_18S',GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',10000000.0],PARAMETER['Central_Meridian',-75.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]];-5120900 1900 10000;-100000 10000;-100000 10000;0.001;0.001;0.001;IsHighPrecision")
msg("Adding cost information to line")
# Join back cost information from edge list
arcpy.JoinField_management(sticks, "EdgeID", edgeListDBF, "EdgeID", "")
msg("Cleaning up")
# Delete extra fields
arcpy.DeleteField_management(in_table=sticks, drop_field="X;Y;X_1;Y_1;EdgeID_1;ID;GRIDCODE;X_12;Y_12;ID_1;GRIDCODE_1;X_12_13;Y_12_13")
# Delete temporary file
arcpy.Delete_management(in_data=outdir + "/edgeList.dbf", data_type="DbaseTable")
| Python | 0.000019 | |
56915ed7d290fff6e37859181781687590a2e974 | Remove early_stopping.py from estimator/contrib in favor of estimator/python/estimator/early_stopping.py. And the test. | tensorflow/contrib/estimator/python/estimator/early_stopping.py | tensorflow/contrib/estimator/python/estimator/early_stopping.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""early_stopping python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import early_stopping
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
early_stopping.__all__ = [
s for s in dir(early_stopping) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.early_stopping import *
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""early_stopping python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.contrib.estimator.python.estimator import early_stopping
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
early_stopping.__all__ = [
s for s in dir(early_stopping) if not s.startswith('__')
]
from tensorflow_estimator.contrib.estimator.python.estimator.early_stopping import *
| Python | 0.000131 |
1ee1d0daab4b8e123bc04996019fb12cc65b8888 | Add tISM SDB module (#36957) | salt/sdb/tism.py | salt/sdb/tism.py | # -*- coding: utf-8 -*-
'''
tISM - the Immutalbe Secrets Manager SDB Module
:maintainer: tISM
:maturity: New
:platform: all
.. versionadded:: TBD
This module will decrypt PGP encrypted secrets against a tISM server.
.. code::
sdb://<profile>/<encrypted secret>
sdb://tism/hQEMAzJ+GfdAB3KqAQf9E3cyvrPEWR1sf1tMvH0nrJ0bZa9kDFLPxvtwAOqlRiNp0F7IpiiVRF+h+sW5Mb4ffB1TElMzQ+/G5ptd6CjmgBfBsuGeajWmvLEi4lC6/9v1rYGjjLeOCCcN4Dl5AHlxUUaSrxB8akTDvSAnPvGhtRTZqDlltl5UEHsyYXM8RaeCrBw5Or1yvC9Ctx2saVp3xmALQvyhzkUv5pTb1mH0I9Z7E0ian07ZUOD+pVacDAf1oQcPpqkeNVTQQ15EP0fDuvnW+a0vxeLhkbFLfnwqhqEsvFxVFLHVLcs2ffE5cceeOMtVo7DS9fCtkdZr5hR7a+86n4hdKfwDMFXiBwSIPMkmY980N/H30L/r50+CBkuI/u4M2pXDcMYsvvt4ajCbJn91qaQ7BDI=
A profile must be setup in the minion configuration or pillar. If you want to use sdb in a runner or pillar you must also place a profile in the master configuration.
.. code-block:: yaml
tism:
driver: tism
url: https://my.tismd:8080/decrypt
token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhZG1pbiI6MSwiZXhwIjoxNTg1MTExNDYwLCJqdGkiOiI3NnA5cWNiMWdtdmw4Iiwia2V5cyI6WyJBTEwiXX0.RtAhG6Uorf5xnSf4Ya_GwJnoHkCsql4r1_hiOeDSLzo
'''
import logging
import json
import salt.utils.http as http
from salt.exceptions import SaltConfigurationError
log = logging.getLogger(__name__)
__virtualname__ = "tism"
def __virtual__():
'''
This module has no other system dependencies
'''
return __virtualname__
def get(key, service=None, profile=None): # pylint: disable=W0613
'''
Get a decrypted secret from the tISMd API
'''
if not profile.get('url') or not profile.get('token'):
raise SaltConfigurationError("url and/or token missing from the tism sdb profile")
request = {"token": profile['token'], "encsecret": key}
result = http.query(
profile['url'],
method='POST',
data=json.dumps(request),
)
decrypted = result.get('body')
if not decrypted:
log.warning('tism.get sdb decryption request failed with error {0}'.format(result.get('error', 'unknown')))
return "ERROR"+str(result.get('status', 'unknown'))
return decrypted
| Python | 0 | |
8dad8cf8c83eba037b29d3243b29b985dc4004a1 | add setup.py | setup.py | setup.py | #!/usr/bin/python
from distutils.core import setup
setup(
name='telepathy-python',
version='0.0.1',
packages=['telepathy'],
)
| Python | 0 | |
b5c2986ccf3c70b9cb52d0374c53bc8232719554 | Add dbm_metrics.py script where the AIS method will be stored | pylearn2/scripts/dbm/dbm_metrics.py | pylearn2/scripts/dbm/dbm_metrics.py | #!/usr/bin/env python
import argparse
if __name__ == '__main__':
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("metric", help="the desired metric",
choices=["ais"])
parser.add_argument("model_path", help="path to the pickled DBM model")
args = parser.parse_args()
metric = args.metric
model_path = args.model_path
| Python | 0 | |
a8b079b8be1e9559770dd0f701385b2361158e24 | Add tests_require to setup.py | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
import io
import os
import sys
if sys.version_info < (2, 6):
raise ImportError("smart_open requires python >= 2.6")
# TODO add ez_setup?
from setuptools import setup, find_packages
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
setup(
name = 'smart_open',
version = '1.3.4',
description = 'Utils for streaming large files (S3, HDFS, gzip, bz2...)',
long_description = read('README.rst'),
packages=find_packages(),
author = u'Radim Řehůřek',
author_email = 'me@radimrehurek.com',
maintainer = u'Radim Řehůřek',
maintainer_email = 'me@radimrehurek.com',
url = 'https://github.com/piskvorky/smart_open',
download_url = 'http://pypi.python.org/pypi/smart_open',
keywords = 'file streaming, s3, hdfs',
license = 'MIT',
platforms = 'any',
install_requires=[
'boto >= 2.32',
'bz2file',
'requests',
],
tests_require=[
'mock',
'moto',
'responses',
],
test_suite="smart_open.tests",
classifiers = [ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
import io
import os
import sys
if sys.version_info < (2, 6):
raise ImportError("smart_open requires python >= 2.6")
# TODO add ez_setup?
from setuptools import setup, find_packages
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
setup(
name = 'smart_open',
version = '1.3.4',
description = 'Utils for streaming large files (S3, HDFS, gzip, bz2...)',
long_description = read('README.rst'),
packages=find_packages(),
author = u'Radim Řehůřek',
author_email = 'me@radimrehurek.com',
maintainer = u'Radim Řehůřek',
maintainer_email = 'me@radimrehurek.com',
url = 'https://github.com/piskvorky/smart_open',
download_url = 'http://pypi.python.org/pypi/smart_open',
keywords = 'file streaming, s3, hdfs',
license = 'MIT',
platforms = 'any',
install_requires=[
'boto >= 2.32',
'bz2file',
'requests',
],
test_suite="smart_open.tests",
classifiers = [ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
| Python | 0.000001 |
c230fc69e2509c79190e53589457f161accd1626 | Change long_description in setup.py. | setup.py | setup.py | import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=description,
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.45',
'PyMySQL >= 0.6.6',
'sqlparse == 0.1.14',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('mycli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for MySQL Database. With auto-completion and syntax highlighting.'
setup(
name='mycli',
author='Amjith Ramanujam',
author_email='amjith[dot]r[at]gmail.com',
version=version,
license='LICENSE.txt',
url='http://mycli.net',
packages=find_packages(),
package_data={'mycli': ['myclirc', '../AUTHORS', '../SPONSORS']},
description=description,
long_description=open('README.md').read(),
install_requires=[
'click >= 4.1',
'Pygments >= 2.0', # Pygments has to be Capitalcased. WTF?
'prompt_toolkit==0.45',
'PyMySQL >= 0.6.6',
'sqlparse == 0.1.14',
'configobj >= 5.0.6',
],
entry_points='''
[console_scripts]
mycli=mycli.main:cli
''',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python | 0 |
2a331f0165b2e3874243fcfecc3e3deab2760ff4 | Add python setup filie | setup.py | setup.py | from setuptools import setup
setup(name='bitevery',
version='0.0.1.b2',
description='BitEvery Python API',
url='https://www.bitevery.com',
author='BitEvery',
author_email='support@bitevery.com',
license='MIT',
packages=['bitevery'],
zip_safe=False) | Python | 0.000002 | |
57bfd23957bdd535b5ae21ed1df3ff25dd75a8bd | Add setup.py | setup.py | setup.py | from setuptools import setup
setup(
name='pirx',
version='0.1',
author='Piotr Wasilewski',
author_email='wasilewski.piotrek@gmail.com',
description='Django settings builder',
license='MIT',
keywords='django settings build builder',
url='https://github.com/piotrekw/pirx',
scripts=['scripts/pirx-build.py'],
packages=['pirx']
)
| Python | 0.000001 | |
76a8834243cc70f3065b686dd09004f1dc3ffdb0 | Create rapideye_remover_bordas_catalogo.py | rapideye_remover_bordas_catalogo.py | rapideye_remover_bordas_catalogo.py | from osgeo import ogr
import os
from osgeo import osr
from qgis.core import *
shapefile = "C:/Users/pedro.mendes/Desktop/Brasil_00_2016.shp"
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapefile, 0)
layer = dataSource.GetLayer()
proj=layer.GetSpatialRef()
outputMergefn = "C:/Users/pedro.mendes/Desktop/Brasil_01_2016.shp"
driverName = 'ESRI Shapefile'
geometryType = ogr.wkbPolygon
out_driver = ogr.GetDriverByName( driverName )
if os.path.exists(outputMergefn):
out_driver.DeleteDataSource(outputMergefn)
out_ds = out_driver.CreateDataSource(outputMergefn)
out_layer = out_ds.CreateLayer(outputMergefn, geom_type=geometryType, srs=proj)
juntaDefn=layer.GetLayerDefn()
juntaFeat=ogr.Geometry(3)
c=0
for feature in layer:
geom = feature.GetGeometryRef()
geom2 = geom.Difference(juntaFeat)
juntaFeat= juntaFeat.Union(geom)
out_feat = ogr.Feature(out_layer.GetLayerDefn())
out_feat.SetGeometry(geom2)
out_layer.CreateFeature(out_feat)
out_layer.SyncToDisk()
c+=1
#break
layer = None
dataSource=None
print "total de feicoes: %i " %( c)
| Python | 0.000038 | |
737dadd2e447c9f03de80ea808e137dcc1206c9b | Create Nvidia_GPU_Temperature.py | Nvidia_GPU_Temperature.py | Nvidia_GPU_Temperature.py | import time
from BlinkyTape import BlinkyTape
import subprocess
import os
import re
#bb = BlinkyTape('/dev/tty.usbmodemfa131')
bb = BlinkyTape('COM8')
while True:
output = subprocess.check_output(["C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe", "-a"], shell=True)
#os.popen('C:\\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe')
#output=os.popen("C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe").read()
#print("====" + str(output) + "=====")
temp = re.search("GPU Current.*",output).group()[30:33]
temp_baseline = 60
temp_multiplier = 5
color_temp = (int(temp) - temp_baseline ) * temp_multiplier
green = 100 - color_temp
red = 0 + color_temp
blue = 0
print "Current GPU Temp: %s RGB: %s %s %s" % (temp, red, green, blue)
for x in range(60):
bb.sendPixel(red, green, blue)
bb.show()
#time.sleep(1)
#for x in range(60):
# bb.sendPixel(100, 0, 0)
#bb.show()
time.sleep(1)
| Python | 0.000028 | |
b39af3af2104875919577f769701e7bde73967fd | clean file initialized | genetic_music.py | genetic_music.py | print('hola chio') | Python | 0.000003 | |
fbc780c7beb94d73b2a4ea110e733f8c87763741 | Add location name lookup for ajax_select. | geoip/lookups.py | geoip/lookups.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
## Author: Orcun Avsar <orc.avs@gmail.com>
##
## Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
"""Module for ajax autocomplete lookups for locations.
"""
from ajax_select import LookupChannel
from geoip.models import Location
from geoip.models import LocationNamesAggregation
class LocationLookup(LookupChannel):
model = Location
def get_query(self,q,request):
words = q.replace(',',' ').replace('-', ' ').split()
query = Location.objects.all()
queries = []
for word in words:
query = Location.objects.filter(name__icontains=word)[:20]
queries.append(query)
entities = []
for query in queries:
for entity in query:
entities.append(entity)
return entities
def format_match(self,obj):
obj.name | Python | 0 | |
af3ba846a8074132c64568c420ecb9b6ade9c6ea | Work on defining RegEx to find and format molecular geometries in Gaussian output files. | geomRegexTest.py | geomRegexTest.py | __author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findgeoms(filename):
"""A function that takes a file name and returns a list of
geometries."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
geoms = geomregex.search(file.read())
print(geoms.group(1))
mlgeoms = geoms.group(1)
for line in mlgeoms.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
print(xyzformat.format(*xyzelemnum))
findgeoms(filename) | Python | 0 | |
df9a6ab91eedfe91343ceb103156fe08cd965614 | test script form new Keras 2x API model config | app/backend-test/keras_2x_api/run01_print_keras_model_json.py | app/backend-test/keras_2x_api/run01_print_keras_model_json.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
if __name__ == '__main__':
pass | Python | 0 | |
1498e786201c1c1e2127da7d23db142559ad68a8 | Add support for Assembla | services/assembla.py | services/assembla.py | import foauth.providers
class Assembla(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'https://www.assembla.com/'
docs_url = 'http://api-doc.assembla.com/content/api_reference.html'
category = 'Code'
# URLs to interact with the API
authorize_url = 'https://api.assembla.com/authorization'
access_token_url = 'https://api.assembla.com/token'
api_domain = 'api.assembla.com'
available_permissions = [
(None, 'read, write and manage your projects'),
]
def __init__(self, *args, **kwargs):
super(Assembla, self).__init__(*args, **kwargs)
self.auth = (self.client_id, self.client_secret)
def get_user_id(self, key):
r = self.api(key, self.api_domain, u'/v1/user')
return unicode(r.json()[u'id'])
| Python | 0 | |
10c7e718488a6daad5bcea97e00aece24179168e | Add regression test for bug #1937084 | nova/tests/functional/regressions/test_bug_1937084.py | nova/tests/functional/regressions/test_bug_1937084.py | # Copyright 2021, Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import exception
from nova import objects
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
class TestDetachAttachmentNotFound(integrated_helpers._IntegratedTestBase):
"""Regression test for the Nova portion of bug 1937084
This regression test asserts the behaviour of Nova when Cinder raises a 404
during a DELETE request against an attachment.
In the context of bug 1937084 this could happen if a caller attempted to
DELETE a volume attachment through Nova's os-volume_attachments API and
then made a separate DELETE request against the underlying volume in Cinder
when it was marked as available.
"""
microversion = 'latest'
def test_delete_attachment_volume_not_found(self):
# Create a server and attach a single volume
server = self._create_server(networks='none')
server_id = server['id']
self.api.post_server_volume(
server_id,
{
'volumeAttachment': {
'volumeId': self.cinder.IMAGE_BACKED_VOL
}
}
)
self._wait_for_volume_attach(server_id, self.cinder.IMAGE_BACKED_VOL)
# Assert that we have an active bdm for the attachment before we detach
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context.get_admin_context(),
self.cinder.IMAGE_BACKED_VOL,
server_id)
with mock.patch(
'nova.volume.cinder.API.attachment_delete',
side_effect=exception.VolumeAttachmentNotFound(
attachment_id=bdm.attachment_id)
) as (
mock_attachment_delete
):
# DELETE /servers/{server_id}/os-volume_attachments/{volume_id} is
# async but as we are using CastAsCall it's sync in our func tests
ex = self.assertRaises(
client.OpenStackApiException,
self.api.delete_server_volume,
server_id,
self.cinder.IMAGE_BACKED_VOL)
self.assertEqual(500, ex.response.status_code)
mock_attachment_delete.assert_called_once()
# FIXME(lyarwood): This is the Nova portion of bug #1937084 where
# the original caller hasn't polled os-volume_attachments and sent
# a seperate DELETE request to c-api for the volume as soon as it
# has become available but before n-cpu has finished the original
# call. This leads to the sync request to c-api to delete the
# attachment returning a 404 that Nova translates into
# VolumeAttachmentNotFound.
#
# Replace this with the following once the exception is ignored:
#
# self.assertRaises(
# exception.VolumeBDMNotFound,
# objects.BlockDeviceMapping.get_by_volume_and_instance,
# context.get_admin_context(),
# self.cinder.IMAGE_BACKED_VOL,
# server_id)
#
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context.get_admin_context(),
self.cinder.IMAGE_BACKED_VOL,
server_id)
| Python | 0.000008 | |
e67abde6228feaa231b2b3bfc97d6ca1f2cf8276 | Use match argument in calls to pytest.raises when testing pin | tests/unit_tests/test_pin.py | tests/unit_tests/test_pin.py | """
Tests for constructing Pin universes
"""
import numpy
import pytest
import openmc
from openmc.model import Pin
@pytest.fixture
def pin_mats():
fuel = openmc.Material(name="UO2")
clad = openmc.Material(name="zirc")
water = openmc.Material(name="water")
return fuel, clad, water
@pytest.fixture
def good_radii():
return (0.4, 0.42)
def test_failure(pin_mats, good_radii):
"""Check for various failure modes"""
# Bad material type
with pytest.raises(TypeError):
Pin.from_radii(good_radii, [mat.name for mat in pin_mats])
# Incorrect lengths
with pytest.raises(ValueError, match="length") as exec_info:
Pin.from_radii(good_radii[: len(pin_mats) - 2], pin_mats)
# Non-positive radii
rad = (-0.1,) + good_radii[1:]
with pytest.raises(ValueError, match="index 0") as exec_info:
Pin.from_radii(rad, pin_mats)
# Non-increasing radii
rad = tuple(reversed(good_radii))
with pytest.raises(ValueError, match="index 1") as exec_info:
Pin.from_radii(rad, pin_mats)
# Bad orientation
with pytest.raises(ValueError, match="Orientation") as exec_info:
Pin.from_radii(good_radii, pin_mats, orientation="fail")
def test_from_radii(pin_mats, good_radii):
name = "test pin"
p = Pin.from_radii(good_radii, pin_mats, name=name)
assert len(p.cells) == len(pin_mats)
assert p.name == name
assert p.radii == good_radii
def test_subdivide(pin_mats, good_radii):
surfs = [openmc.ZCylinder(r=r) for r in good_radii]
pin = Pin(surfs, pin_mats)
assert pin.radii == good_radii
assert len(pin.cells) == len(pin_mats)
# subdivide inner region
N = 5
pin.subdivide_ring(0, N)
assert len(pin.radii) == len(good_radii) + N - 1
assert len(pin.cells) == len(pin_mats) + N - 1
# check volumes of new rings
bounds = (0,) + pin.radii[:N]
sqrs = numpy.square(bounds)
assert sqrs[1:] - sqrs[:-1] == pytest.approx(good_radii[0] ** 2 / N)
# subdivide non-inner most region
new_pin = Pin.from_radii(good_radii, pin_mats)
new_pin.subdivide_ring(1, N)
assert len(new_pin.radii) == len(good_radii) + N - 1
assert len(new_pin.cells) == len(pin_mats) + N - 1
# check volumes of new rings
bounds = new_pin.radii[:N + 1]
sqrs = numpy.square(bounds)
assert sqrs[1:] - sqrs[:-1] == pytest.approx(
(good_radii[1] ** 2 - good_radii[0] ** 2) / N)
| """
Tests for constructing Pin universes
"""
import numpy
import pytest
import openmc
from openmc.model import Pin
@pytest.fixture
def pin_mats():
fuel = openmc.Material(name="UO2")
clad = openmc.Material(name="zirc")
water = openmc.Material(name="water")
return fuel, clad, water
@pytest.fixture
def good_radii():
return (0.4, 0.42)
def test_failure(pin_mats, good_radii):
"""Check for various failure modes"""
# Bad material type
with pytest.raises(TypeError):
Pin.from_radii(good_radii, [mat.name for mat in pin_mats])
# Incorrect lengths
with pytest.raises(ValueError) as exec_info:
Pin.from_radii(good_radii[: len(pin_mats) - 2], pin_mats)
assert "length" in str(exec_info)
# Non-positive radii
rad = (-0.1,) + good_radii[1:]
with pytest.raises(ValueError) as exec_info:
Pin.from_radii(rad, pin_mats)
assert "index 0" in str(exec_info)
# Non-increasing radii
rad = tuple(reversed(good_radii))
with pytest.raises(ValueError) as exec_info:
Pin.from_radii(rad, pin_mats)
assert "index 1" in str(exec_info)
# Bad orientation
with pytest.raises(ValueError) as exec_info:
Pin.from_radii(good_radii, pin_mats, orientation="fail")
assert "Orientation" in str(exec_info)
def test_from_radii(pin_mats, good_radii):
name = "test pin"
p = Pin.from_radii(good_radii, pin_mats, name=name)
assert len(p.cells) == len(pin_mats)
assert p.name == name
assert p.radii == good_radii
def test_subdivide(pin_mats, good_radii):
surfs = [openmc.ZCylinder(r=r) for r in good_radii]
pin = Pin(surfs, pin_mats)
assert pin.radii == good_radii
assert len(pin.cells) == len(pin_mats)
# subdivide inner region
N = 5
pin.subdivide_ring(0, N)
assert len(pin.radii) == len(good_radii) + N - 1
assert len(pin.cells) == len(pin_mats) + N - 1
# check volumes of new rings
bounds = (0,) + pin.radii[:N]
sqrs = numpy.square(bounds)
assert sqrs[1:] - sqrs[:-1] == pytest.approx(good_radii[0] ** 2 / N)
# subdivide non-inner most region
new_pin = Pin.from_radii(good_radii, pin_mats)
new_pin.subdivide_ring(1, N)
assert len(new_pin.radii) == len(good_radii) + N - 1
assert len(new_pin.cells) == len(pin_mats) + N - 1
# check volumes of new rings
bounds = new_pin.radii[:N + 1]
sqrs = numpy.square(bounds)
assert sqrs[1:] - sqrs[:-1] == pytest.approx((good_radii[1] ** 2 - good_radii[0] ** 2) / N)
| Python | 0 |
0fc46c92f8682879591d9fc473be34116c9106be | add migration | custom/ilsgateway/migrations/0010_auto_20160830_1923.py | custom/ilsgateway/migrations/0010_auto_20160830_1923.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('ilsgateway', '0009_auto_20160413_1311'),
]
operations = [
migrations.AlterField(
model_name='deliverygroupreport',
name='report_date',
field=models.DateTimeField(default=datetime.datetime.utcnow),
preserve_default=True,
),
migrations.AlterField(
model_name='slabconfig',
name='sql_location',
field=models.OneToOneField(to='locations.SQLLocation'),
preserve_default=True,
),
migrations.AlterField(
model_name='supplypointstatus',
name='status_type',
field=models.CharField(max_length=50, choices=[(b'rr_fac', b'rr_fac'), (b'trans_fac', b'trans_fac'), (b'soh_fac', b'soh_fac'), (b'super_fac', b'super_fac'), (b'rr_dist', b'rr_dist'), (b'del_del', b'del_del'), (b'la_fac', b'la_fac'), (b'del_dist', b'del_dist'), (b'del_fac', b'del_fac')]),
preserve_default=True,
),
]
| Python | 0.000001 | |
fdd9ac1da19d37ca482d770bb0c8f159fb7d4752 | optimize to not instantiate Fortune. | flask/app.py | flask/app.py | #!/usr/bin/env python
from flask import Flask, jsonify, request, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from random import randint
from operator import attrgetter
try:
import MySQLdb
mysql_schema = "mysql:"
except ImportError:
mysql_schema = "mysql+pymysql:"
# setup
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = mysql_schema + '//benchmarkdbuser:benchmarkdbpass@DBHOSTNAME:3306/hello_world?charset=utf8'
db = SQLAlchemy(app)
dbraw_engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
# models
class World(db.Model):
__tablename__ = "World"
id = db.Column(db.Integer, primary_key=True)
randomNumber = db.Column(db.Integer)
# http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'randomNumber': self.randomNumber
}
class Fortune(db.Model):
__tablename__ = "Fortune"
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String)
# views
@app.route("/json")
def hello():
resp = {"message": "Hello, World!"}
return jsonify(resp)
@app.route("/db")
def get_random_world():
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
worlds.append(World.query.get(wid).serialize)
return jsonify(worlds=worlds)
@app.route("/dbs")
def get_random_world_single():
wid = randint(1, 10000)
worlds = [World.query.get(wid).serialize]
return jsonify(worlds=worlds)
@app.route("/dbraw")
def get_random_world_raw():
connection = dbraw_engine.connect()
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
connection.close()
return jsonify(worlds=worlds)
@app.route("/dbsraw")
def get_random_world_single_raw():
connection = dbraw_engine.connect()
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds = [{'id': result[0], 'randomNumber': result[1]}]
connection.close()
return jsonify(worlds=worlds)
@app.route("/fortunes")
def get_fortunes():
fortunes = list(Fortune.query.all())
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return render_template('fortunes.html', fortunes=fortunes)
@app.route("/fortunesraw")
def get_forutens_raw():
fortunes = list(dbraw_engine.execute("SELECT * FROM Fortune"))
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return render_template('fortunes.html', fortunes=fortunes)
# entry point for debugging
if __name__ == "__main__":
app.run(debug=True)
| #!/usr/bin/env python
from flask import Flask, jsonify, request, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from random import randint
from operator import attrgetter
try:
import MySQLdb
mysql_schema = "mysql:"
except ImportError:
mysql_schema = "mysql+pymysql:"
# setup
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = mysql_schema + '//benchmarkdbuser:benchmarkdbpass@DBHOSTNAME:3306/hello_world?charset=utf8'
db = SQLAlchemy(app)
dbraw_engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
# models
class World(db.Model):
__tablename__ = "World"
id = db.Column(db.Integer, primary_key=True)
randomNumber = db.Column(db.Integer)
# http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'randomNumber': self.randomNumber
}
class Fortune(db.Model):
__tablename__ = "Fortune"
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String)
# views
@app.route("/json")
def hello():
resp = {"message": "Hello, World!"}
return jsonify(resp)
@app.route("/db")
def get_random_world():
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
worlds.append(World.query.get(wid).serialize)
return jsonify(worlds=worlds)
@app.route("/dbs")
def get_random_world_single():
wid = randint(1, 10000)
worlds = [World.query.get(wid).serialize]
return jsonify(worlds=worlds)
@app.route("/dbraw")
def get_random_world_raw():
connection = dbraw_engine.connect()
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
connection.close()
return jsonify(worlds=worlds)
@app.route("/dbsraw")
def get_random_world_single_raw():
connection = dbraw_engine.connect()
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds = [{'id': result[0], 'randomNumber': result[1]}]
connection.close()
return jsonify(worlds=worlds)
@app.route("/fortunes")
def get_fortunes():
fortunes = list(Fortune.query.all())
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return render_template('fortunes.html', fortunes=fortunes)
@app.route("/fortunesraw")
def get_forutens_raw():
fortunes = []
for row in dbraw_engine.execute("SELECT * FROM Fortune"):
fortunes.append(Fortune(id=row.id, message=row.message))
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter('message'))
return render_template('fortunes.html', fortunes=fortunes)
# entry point for debugging
if __name__ == "__main__":
app.run(debug=True)
| Python | 0 |
3dfa8bb2d428f86c5156a974e84e0756cc6d792f | Create headache.py | headache.py | headache.py | # This is... Headache! One more very simple Brainfuck interpreter! #
# by Sidnei Diniz - sidneidiniz@gmail.com - http://bitworm.com.br #
# GitHub: http://github.com/scdiniz/headache
# Date: 29-12-2015 #
import sys
# Interpreter kernel
class Headache():
# Constructor
def __init__(self):
self.cells = bytearray([0] * 30000)
self.commands = []
# Load code file
def load(self, file):
code = open(file, "r")
for line in code:
for c in line:
if c in ("<", ">", "+", "-", ".", ",", "[", "]"):
self.commands.append(c)
code.close()
# Verify loop for errors
def validateLoop(self):
countStart = 0
countEnd = 0
for cmd in self.commands:
if cmd == "[":
countStart += 1
if cmd == "]":
countEnd += 1
return countStart == countEnd
# Make loop dictionary
def setLoopDict(self):
if self.validateLoop():
self.loopDict = {}
tmp = []
i = 0
while i < len(self.commands):
if self.commands[i] == "[":
tmp.append(i)
if self.commands[i] == "]":
if len(tmp) > 0:
value = tmp.pop()
self.loopDict[value] = i
self.loopDict[i] = value
else:
return False
i += 1
return True
else:
return False
# Run interpreter
def run(self, file):
self.load(file)
# Make loop dictionary
if self.setLoopDict():
cell = 0
i = 0
# Execute command by command
while i < len(self.commands):
if self.commands[i] == "<":
cell -= 1
if self.commands[i] == ">":
cell += 1
if self.commands[i] == "+":
if self.cells[cell] < 255:
self.cells[cell] += 1
else:
self.cells[cell] = 0
if self.commands[i] == "-":
if self.cells[cell] > 0:
self.cells[cell] -= 1
else:
self.cells[cell] = 255
if self.commands[i] == "]":
if self.cells[cell] > 0:
i = self.loopDict[i]
if self.commands[i] == "[":
if self.cells[cell] == 0:
i = self.loopDict[i]
if self.commands[i] == ",":
self.cells[cell] = ord(input()[0])#ord(input()[0])
if self.commands[i] == ".":
try:
print(chr(self.cells[cell]), end = "", flush = True)
except:
None
i += 1
else:
# Error on loop dictionary
print("My head hurts! Verify your loop instructions '[' ']'")
# Start
count = 0
file = ""
# Reading sys arguments
for arg in sys.argv:
count += 1
if count == 2:
file = arg
break
# Verify if file name was insert
if count < 2:
print("My head hurts! Come on, tell me brainfuck file name!")
else:
# Launch interpreter
Headache().run(file)
| Python | 0.001409 | |
c2b69a51faac56689edc88e747a00b60cf08cc04 | Add default ordering of progress outcome groups | dthm4kaiako/poet/migrations/0003_auto_20190731_1912.py | dthm4kaiako/poet/migrations/0003_auto_20190731_1912.py | # Generated by Django 2.1.5 on 2019-07-31 07:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('poet', '0002_progressoutcomegroup'),
]
operations = [
migrations.AlterModelOptions(
name='progressoutcomegroup',
options={'ordering': ['name']},
),
]
| Python | 0 | |
32a79573b38c6d2ea7f5b81363610a5d9332ed4e | Add python script to parse JSON output | src/main/resources/jsonformat.py | src/main/resources/jsonformat.py | #!/usr/bin/python2.7
import json
import socket
import sys
def readOutput(host, port):
data = None
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, int(port)))
except socket.error as msg:
s = None
print msg
if s is None:
return None
try:
data = s.recv(1024)
except socket.error as msg:
print msg
if s is not None:
s.close
return data
def parseData(jsonData, metric, key):
data = json.loads(jsonData)
for x in data:
if not 'name' in x:
continue
if x['name'] == metric:
if not 'datapoint' in x:
continue
monitorData = x['datapoint']
for k in monitorData:
if k == key:
return monitorData[k]
return 'Metric [%s:%s] not found'%(metric,key)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Usage python jsonformat.py host port metric:key ...'
print 'The output like:'
print '[value1,value2,...]'
else:
jsonData = readOutput(sys.argv[1], sys.argv[2])
if jsonData is None:
print 'Read JSON data error'
else:
l = []
for x in sys.argv[3:]:
args = x.split(':')
if len(args) != 2:
continue
value = parseData(jsonData, args[0], args[1])
l.append(value)
print l
| Python | 0.000006 | |
699469342179fdc4319b5f39ea201015860ef09d | Add migration for CI fix | infrastructure/migrations/0020_auto_20210922_0929.py | infrastructure/migrations/0020_auto_20210922_0929.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-09-22 07:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0019_project_latest_implementation_year'),
]
operations = [
migrations.AlterField(
model_name='project',
name='latest_implementation_year',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='infrastructure.FinancialYear'),
),
]
| Python | 0 | |
b19ba3f63240db6c710e8ef7388d7a5b9a2834c2 | bring back tecplot reader | tests/problems/tecplot_reader.py | tests/problems/tecplot_reader.py | # -*- coding: utf-8 -*-
#
'''
Read Tecplot data.
'''
import numpy
import re
def read(filename):
# Read the meta data.
# It is principally structured like
#
# NAME = content
#
# Items aren't separated by anything else than whitespace. Hence, the only
# character that provides structure here is the equality sign "=".
c = {}
previous_key = None
f = open(filename, 'r')
line = f.readline()
while line:
out_key_value = re.match('([A-Z][A-Z ]*)\s*=\s*"([^"]*)"\s*', line)
out_value = re.match('\s*"([^"]*)"\s*', line)
if out_key_value:
key = out_key_value.group(1).strip()
value = out_key_value.group(2)
if re.match('ZONE.*', key):
# Special treatment for zones.
c[key] = _read_zone(f, c['VARIABLES'])
c[key]['title'] = value
else:
c[key] = value
previous_key = key
elif out_value:
# Only a value present in this line. It must belong to the previous
# key.
value = out_value.group(1)
try:
c[previous_key].append(value)
except RuntimeError:
# Convert previous key-value to key-listofvalues.
previous_value = c[previous_key]
c[previous_key] = [previous_value, value]
# Read next line.
line = f.readline()
f.close()
return c
def _read_zone(f, variable_names):
'''Read ZONE data from a Tecplot file.
'''
zone = {}
print('Reading zone header...')
line = f.readline()
while line: # zone_header and line:
# Read the zone header.
# Break up the line at commas and read individually.
all_units_success = True
units = line.split(',')
for unit in units:
re_key_value = '\s*([A-Z][A-Za-z]*)=(.*)'
out = re.match(re_key_value, unit)
if out:
key = out.group(1)
value = out.group(2)
if key == 'STRANDID' or key == 'Nodes' or key == 'Elements':
value = int(value)
elif key == 'SOLUTIONTIME':
value = float(value)
zone[key] = value
else:
all_units_success = False
break
if not all_units_success:
# we must be in the numerical data section already
break
line = f.readline()
print('Reading zone data...')
# Fill in the numerical data into an array.
num_nodes = zone['Nodes']
# data = numpy.empty((num_nodes, num_colums))
# We're in a ZONE and the pattern doesn't match KEY=value. This must mean
# we're dealing with numerical values now. Check out what DT says and
# build the appropriate regex.
dt = zone['DT']
# Strip leading and trailing brackets.
dt = dt.strip('() ')
# Convert the Tecplot DT (data type) to an array of numpy data types.
data = {}
tp_datatypes = dt.split()
num_columns = len(tp_datatypes)
assert(num_columns == len(variable_names))
for l, tp_dt in enumerate(tp_datatypes):
name = variable_names[l]
if tp_dt == 'SINGLE':
data[name] = numpy.empty(num_nodes, dtype=float)
else:
raise RuntimeError('Unknown Tecplot data type \'%s\'.' % tp_dt)
# Build the regex for every data line.
SINGLE_regex = '[-+]?[0-9]\.[0-9]+E[-+][0-9][0-9]'
dt = ' ' + dt.replace('SINGLE', '(' + SINGLE_regex + ')')
# Read all node data.
for k in range(num_nodes):
out = re.match(dt, line)
assert(out)
for l in range(num_columns):
name = variable_names[l]
data[name][k] = out.group(l+1)
line = f.readline()
# Copy over the data.
zone['node data'] = data
# Read elements (element connectivity).
num_elements = zone['Elements']
if zone['ZONETYPE'] == 'FELineSeg':
num_nodes_per_element = 2
else:
raise RuntimeError('Invalid ZONETYPE \'%s\'.' % zone['ZONETYPE'])
data = numpy.empty((num_nodes, num_nodes_per_element), dtype=int)
element_regex = ' ([0-9]+)+\s+([0-9]+)'
for k in range(num_elements):
out = re.match(element_regex, line)
assert(out)
for l in range(num_nodes_per_element):
data[k][l] = out.group(l+1)
line = f.readline()
zone['element data'] = data
return zone
def read_with_vtk(filename):
import vtk
reader = vtk.vtkTecplotReader()
reader.SetFileName(filename)
reader.Update()
exit()
return
| Python | 0 | |
2ead746f0e697276e7753c735befbd1a14feba6d | Restrict parquet many cols test to one test dimension. | tests/query_test/test_parquet.py | tests/query_test/test_parquet.py | #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
import pytest
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
# Tests specific to parquet.
class TestParquetManyColumns(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestParquetManyColumns, cls).add_test_dimensions()
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
if cls.exploration_strategy() == 'core':
# Don't run on core. This test is very slow (IMPALA-864) and we are unlikely
# to regress here.
cls.TestMatrix.add_constraint(lambda v: False);
def test_many_columns(self, vector):
NUM_COLS = 2000
TABLE_NAME = "functional_parquet.parquet_many_cols"
self.client.execute("drop table if exists " + TABLE_NAME)
col_descs = ["col" + str(i) + " int" for i in range(NUM_COLS)]
create_stmt = "CREATE TABLE " + TABLE_NAME +\
"(" + ', '.join(col_descs) + ") stored as parquet"
col_vals = [str(i) for i in range(NUM_COLS)]
insert_stmt = "INSERT INTO " + TABLE_NAME + " VALUES(" + ", ".join(col_vals) + ")"
expected_result = "\t".join(col_vals)
self.client.execute(create_stmt)
self.client.execute(insert_stmt)
result = self.client.execute("select count(*) from " + TABLE_NAME)
assert result.data == ["1"]
result = self.client.execute("select * from " + TABLE_NAME)
assert result.data == [expected_result]
| #!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
import pytest
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
# Tests specific to parquet.
class TestParquetManyColumns(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestParquetManyColumns, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
if cls.exploration_strategy() == 'core':
# Don't run on core. This test is very slow (IMPALA-864) and we are unlikely
# to regress here.
cls.TestMatrix.add_constraint(lambda v: False);
def test_many_columns(self, vector):
NUM_COLS = 2000
TABLE_NAME = "functional_parquet.parquet_many_cols"
self.client.execute("drop table if exists " + TABLE_NAME)
col_descs = ["col" + str(i) + " int" for i in range(NUM_COLS)]
create_stmt = "CREATE TABLE " + TABLE_NAME +\
"(" + ', '.join(col_descs) + ") stored as parquet"
col_vals = [str(i) for i in range(NUM_COLS)]
insert_stmt = "INSERT INTO " + TABLE_NAME + " VALUES(" + ", ".join(col_vals) + ")"
expected_result = "\t".join(col_vals)
self.client.execute(create_stmt)
self.client.execute(insert_stmt)
result = self.client.execute("select count(*) from " + TABLE_NAME)
assert result.data == ["1"]
result = self.client.execute("select * from " + TABLE_NAME)
assert result.data == [expected_result]
| Python | 0.999962 |
09d815c6b53c74ae9a2f3831a2eec9c2b266eca7 | add the prototype. | simple_xls_to_xml.py | simple_xls_to_xml.py | # encoding:utf-8
import codecs
import xlrd
import xml.dom.minidom
filter_words = None
def xlsRead():
global filter_words
data = xlrd.open_workbook("filter.xlsx")
table = data.sheets()[0] # 获取第一个sheet
filter_words = table.col_values(0)
def createXML():
if filter_words is None:
return
impl = xml.dom.minidom.getDOMImplementation()
dom = impl.createDocument(None, "filters", None)
root = dom.documentElement
for f in filter_words:
filter = dom.createElement("filter")
filter.setAttribute("word", f)
root.appendChild(filter)
out = codecs.open("filters.xml", "w", "utf-8")
dom.writexml(out, addindent=" ", newl="\n", encoding="utf-8")
out.close()
if __name__ == "__main__":
xlsRead()
createXML() | Python | 0 | |
6a268c69fced2a5b9e97086fa2a9089837376db4 | add subfolder | keras/metrics/empty.py | keras/metrics/empty.py | #
| Python | 0.000005 | |
9524b824e5edb6e88c776d3420b618b6a2d1b7fa | Add files via upload | src/graph_realtimeEdit.py | src/graph_realtimeEdit.py | from pylsl import StreamInlet, resolve_byprop, local_clock, TimeoutError
from pylsl import StreamInfo,StreamOutlet
from random import random as rand
import collections
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui,QtCore
import time
import signal, sys, os, time, csv
import serial
import threading
win = pg.GraphicsWindow()
graph = None
class Graph(object):
def __init__(self,size=(600,350)):
streams = resolve_byprop('name', 'bci', timeout=2.5)
try:
self.inlet = StreamInlet(streams[0])
except IndexError:
raise ValueError('Make sure stream name=bci is opened first.')
self.running = True
self.frequency = 250.0
self.sampleinterval = (1/self.frequency)
self.timewindow = 10
self._bufsize = int(self.timewindow/self.sampleinterval)
self.dataBuffer = collections.deque([0.0]*self._bufsize,self._bufsize)
self.timeBuffer = collections.deque([0.0]*self._bufsize,self._bufsize)
self.x = np.zeros(self._bufsize)
self.y = np.zeros(self._bufsize)
self.app = QtGui.QApplication([])
self.plt = pg.plot(title='Dynamic Plotting with PyQtGraph')
self.plt.resize(*size)
self.plt.showGrid(x=True,y=True)
self.plt.setLabel('left','amplitude','V')
self.plt.setLabel('bottom','time','s')
self.curve = self.plt.plot(self.x,self.y,pen=(255,0,0))
def _graph_lsl(self):
while self.running:
sample, timestamp = self.inlet.pull_sample(timeout=5)
# time correction to sync to local_clock()
try:
if timestamp is not None and sample is not None:
timestamp = timestamp + self.inlet.time_correction(timeout=5)
# TODO Place graphing stuff here
self.dataBuffer.append(sample[0])
self.y[:] = self.dataBuffer
self.timeBuffer.append(timestamp)
self.x[:] = self.timeBuffer
# added
self.sampleNum = self.x
self.timestampIndex = self.y
self.sampleNum = np.roll(self.sampleNum, 1) # scroll data
self.timestampIndex = np.roll(self.timestampIndex, 1)
self.curve.setData(self.sampleNum, self.timestampIndex) # re-plot
self.app.processEvents()
print(sample, timestamp)
except TimeoutError:
pass
print('closing graphing utility')
self.inlet.close_stream()
def start(self):
self.lsl_thread = threading.Thread(target=self._graph_lsl)
self.lsl_thread.start()
def stop(self):
self.running = False
self.lsl_thread.join(5)
# Place any graphing termination or cleanup here
def load(queue):
global graph
graph = Graph()
print('init graph')
def randomData():
info = StreamInfo('bci','randomData',1,150)
outlet = StreamOutlet(info)
print ('now sending data')
while True:
sample = [rand()]
outlet.push_sample(sample)
time.sleep(1)
def start():
graph.start()
graph.app.exec_()
def stop():
graph.stop()
print('Stopping graphing.')
os._exit(0) # dirty, but it's ok because everything is already cleaned up
def sigint_handler(signal, frame):
stop()
def sigterm_handler(signal, frame):
stop()
def main():
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
data_stream = threading.Thread(target=randomData)
data_stream.start()
load(queue=None)
start()
try:
signal.pause()
except AttributeError:
while True:
time.sleep(1)
stop()
def begin(queue, event=None):
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
load(queue)
start()
try:
while True:
signal.pause()
except AttributeError:
# signal.pause() not implemented on windows
while not event.is_set():
time.sleep(1)
print('event was set, stopping')
stop()
if __name__ == '__main__':
main()
| Python | 0 | |
1d1712259a1e6e23b7a6a5541f70573b05619e99 | Create stock.py | stock.py | stock.py | from openerp.osv import fields, osv
class stock_move(osv.Model):
_name = 'stock.move'
_inherit = 'stock.move'
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
res_prod = super(stock_move, self).onchange_product_id(cr, uid, ids, prod_id, loc_id,loc_dest_id, partner_id)
prod_obj = self.pool.get('product.product')
obj = prod_obj.browse(cr, uid, prod_id)
res_prod['value'].update({'image_small': obj.image_small})
return res_prod
_columns = {
'image_small' : fields.binary('Product Image'),
}
stock_move()
class sale_order_line(osv.Model):
_name = 'sale.order.line'
_inherit = 'sale.order.line'
_columns = {
'image_small' : fields.binary('Product Image'),
}
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False,image_small=False, context=None):
context = context or {}
res = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
product_obj = self.pool.get('product.product')
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'image_small': product_obj.image_small or False})
return res
sale_order_line()
class sale_order(osv.Model):
_name = 'sale.order'
_inherit = 'sale.order'
def _prepare_order_line_move(self, cr, uid, order, line, picking_id, date_planned, context=None):
res = super(sale_order, self)._prepare_order_line_move(cr, uid, order=order, line=line, picking_id=picking_id, date_planned=date_planned, context=context)
res['image_small'] = line.image_small
return res
sale_order()
| Python | 0.000001 | |
ed09ca11fc3586c9782103269b12240ed6b27911 | complete and tested juliaset, HW4 | juliaset.py | juliaset.py | class JuliaSet(object):
def set_plane(self, _d):
self._d=_d
self._complexplane=[]
x=-2
y=-2
while x<=2:
while y<=2:
self._complexplane.append(complex(x,y))
y+=_d
x+=_d
y=-2
return self._complexplane
def __init__(self, c, n=100):
self.c = c
self.n = n
self._d=0.001
self._complexplane=[]#self.set_plane(self._d)
def juliamap(self, z):
return ((z**2)+self.c)
def iterate(self, z):
m = 0
while True:
m+=1
z=self.juliamap(z)
if abs(z)>2:
return m
elif m>=self.n:
return 0
def set_spacing(self, d):
self._d = d
self._complexplane=self.set_plane(self._d)
def generate(self):
self.set = [self.iterate(z) for z in self._complexplane]
return self.set
| Python | 0 | |
8282cca05b784bb0966ba8246900627286c5d98c | Use invoke as build tool | tasks.py | tasks.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from os.path import join, abspath, dirname, exists
from invoke import run, task
ROOT = abspath(join(dirname(__file__)))
I18N_DOMAIN = 'udata-admin'
def green(text):
return '\033[1;32m{0}\033[0;m'.format(text)
def red(text):
return '\033[1;31m{0}\033[0;m'.format(text)
def cyan(text):
return '\033[1;36m{0}\033[0;m'.format(text)
def lrun(command, *args, **kwargs):
run('cd {0} && {1}'.format(ROOT, command), *args, **kwargs)
def nrun(command, *args, **kwargs):
lrun('node_modules/.bin/{0}'.format(command), *args, **kwargs)
@task
def clean(bower=False, node=False):
'''Cleanup all build artifacts'''
patterns = [
'build', 'dist', 'cover', 'docs/_build',
'**/*.pyc', '*.egg-info', '.tox'
]
if bower:
patterns.append('udata/static/bower')
if node:
patterns.append('node_modules')
for pattern in patterns:
print('Removing {0}'.format(pattern))
run('cd {0} && rm -rf {1}'.format(ROOT, pattern))
@task
def test():
'''Run tests suite'''
run('cd {0} && nosetests --rednose --force-color udata'.format(ROOT), pty=True)
@task
def cover():
'''Run tests suite with coverage'''
run('cd {0} && nosetests --rednose --force-color \
--with-coverage --cover-html --cover-package=udata'.format(ROOT), pty=True)
@task
def doc():
'''Build the documentation'''
run('cd {0}/doc && make html'.format(ROOT), pty=True)
@task
def qa():
'''Run a quality report'''
run('flake8 {0}/udata'.format(ROOT))
@task
def serve():
run('cd {0} && python manage.py serve -d -r'.format(ROOT), pty=True)
@task
def work(loglevel='info'):
run('celery -A udata.worker worker --purge --autoreload -l %s' % loglevel)
@task
def beat(loglevel='info'):
run('celery -A udata.worker beat -l %s' % loglevel)
@task
def i18n():
run('python setup.py extract_messages')
run('python setup.py update_catalog')
run('udata i18njs -d udata udata/static')
@task
def i18nc():
run('cd {0} && python setup.py compile_catalog'.format(ROOT))
@task
def build():
print(cyan('Compiling translations'))
lrun('python setup.py compile_catalog')
@task(build)
def dist():
'''Package for distribution'''
print(cyan('Building a distribuable package'))
lrun('python setup.py bdist_wheel', pty=True)
| Python | 0.000001 | |
c63144242d9cf2ecf02d58eb9a93cfe426acc6dc | Add script to send unregister user emails | scripts/send_preprint_unreg_contributor_emails.py | scripts/send_preprint_unreg_contributor_emails.py | # -*- coding: utf-8 -*-
"""Sends an unregistered user claim email for preprints created after 2017-03-14. A hotfix was made on that
date which caused unregistered user claim emails to not be sent. The regression was fixed on 2017-05-05. This
sends the emails that should have been sent during that time period.
NOTE: This script should only be run ONCE.
"""
import sys
import logging
import datetime as dt
import pytz
from framework.auth import Auth
from website.app import init_app
init_app(routes=False)
from website.project import signals as project_signals
from scripts import utils as script_utils
from website.project.views import contributor # flake8: noqa (set up listeners)
from osf.models import PreprintService
logger = logging.getLogger(__name__)
logging.getLogger('website.mails.mails').setLevel(logging.CRITICAL)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/568413a77cc51511a0f7afe081a218676a36ebb6 was committed
START_DATETIME = dt.datetime(2017, 3, 14, 19, 10, tzinfo=pytz.utc)
# datetime at which https://github.com/CenterForOpenScience/osf.io/commit/38513916bb9584eb723c46e35553dc6d2c267e1a was deployed
END_DATETIME = dt.datetime(2017, 5, 5, 5, 48, tzinfo=pytz.utc)
def main():
dry_run = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
count = 0
preprints = PreprintService.objects.filter(
is_published=True,
date_published__gte=START_DATETIME,
date_published__lte=END_DATETIME
).order_by('date_published').select_related('node', 'node__creator')
for preprint in preprints:
auth = Auth(preprint.node.creator)
for author in preprint.node.contributors.filter(is_active=False):
assert not author.is_registered
logger.info('Sending email to unregistered User {} on PreprintService {}'.format(author._id, preprint._id))
if not dry_run:
project_signals.contributor_added.send(
preprint.node,
contributor=author,
auth=auth,
email_template='preprint'
)
count += 1
logger.info('Sent an email to {} unregistered users'.format(count))
if __name__ == '__main__':
main()
| Python | 0 | |
fcac525d3f974c7d4a1e90c1adc444c6d6e72018 | Add sed executor #123 | executors/SED.py | executors/SED.py |
from .base_executor import ScriptExecutor
from judgeenv import env
class Executor(ScriptExecutor):
ext = '.sed'
name = 'SED'
command = env['runtime'].get('sed')
test_program = '''s/.*/echo: Hello, World!/
q'''
fs = ['.*\.(so|sed)', '/dev/urandom$', '/proc/self/maps$', '/proc/filesystems$', '/+lib/charset.alias$']
syscalls = ['getgroups32', 'statfs64']
def get_cmdline(self):
return [self.get_command(), '-f', self._code]
initialize = Executor.initialize
| Python | 0.000001 | |
1f52ef331a3529fe0f8b1ad5528d4d5cdd5d0b7a | add mnist deep auto like hinton's | rbm/autoencoder/mnist_deep_auto.py | rbm/autoencoder/mnist_deep_auto.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Author : @Jason_wbw
"""
This program pertrains a deep autoencoder for MNIST dataset
You cat set the maxinum number of epochs for pertraining each layer
and you can set the architectrue of the multiplayer nets.
"""
from converter import Converter
from rbm import RBM, RBMLinear
import pickle
class MNISTDeepAuto(object):
def __init__(self, batch_num = 100):
self._load_data()
def _load_data(self):
print "begin converting data into memory"
self.converter = Converter()
print "converting end\n"
def train(self):
print "train rbm level 1"
rbm = RBM(self.converter.dimensionality, 1000)
rbm.train(self.converter.train_images, max_epochs = 10, batch = 100)
hidden_probs1 = rbm.hidden_probs
self.pickle_dumps(rbm.weights, 'l1_w.pkl')
self.pickle_dumps(rbm.hidden_bias, 'l1_hb.pkl')
self.pickle_dumps(rbm.visible_bias, 'l1_vb.pkl')
del rbm
print "train rbm level 1 end\n"
print "train rbm level 2"
rbm_l2 = RBM(1000, 500)
rbm_l2.train(hidden_probs1, max_epochs = 10, batch = 100)
hidden_probs2 = rbm_l2.hidden_probs
self.pickle_dumps(rbm_l2.weights, 'l2_w.pkl')
self.pickle_dumps(rbm_l2.hidden_bias, 'l2_hb.pkl')
self.pickle_dumps(rbm_l2.visible_bias, 'l2_vb.pkl')
del rbm_l2
print "train rbm level 2 end\n"
print "train rbm level 3"
rbm_l3 = RBM(500, 250)
rbm_l3.train(hidden_probs2, max_epochs = 10, batch = 100)
hidden_probs3 = rbm_l3.hidden_probs
self.pickle_dumps(rbm_l3.weights, 'l3_w.pkl')
self.pickle_dumps(rbm_l3.hidden_bias, 'l3_hb.pkl')
self.pickle_dumps(rbm_l3.visible_bias, 'l3_vb.pkl')
del rbm_l3
print "train rbm level 3 end\n"
print "train rbm level 4"
rbm_l4 = RBMLinear(250, 30)
rbm_l4.train(hidden_probs3, max_epochs = 10, batch = 100)
hidden_top = rbm_l4.hidden_probs
self.pickle_dumps(rbm_l4.weights, 'l4_w.pkl')
self.pickle_dumps(rbm_l4.hidden_bias, 'l4_hb.pkl')
self.pickle_dumps(rbm_l4.visible_bias, 'l4_vb.pkl')
del rbm_l4
print "train rbm level 4 end\n"
def pickle_dumps(self, obj, filename):
f = open(filename, 'w')
pickle.dump(obj, f)
f.close()
if __name__ == '__main__':
auto = MNISTDeepAuto()
auto.train() | Python | 0 | |
84cf95cde942d91f53959fea4151847902a69d14 | Add a cleanup script. | rl-rc-car/cleanup.py | rl-rc-car/cleanup.py | from rccar import RCCar
car = RCCar()
car.cleanup_gpio()
| Python | 0 | |
b2741a8316ea1ffbf9e88a9fb883ef9e2507be42 | Upgrade libchromiuncontent to 3245ef8 | script/lib/config.py | script/lib/config.py | #!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '3245ef802fbf546f1a1d206990aa9d18be6bfbfe'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
| #!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'f0c3a4546d8e75689c16b9aee1052a72951e58de'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
| Python | 0 |
a03eaddd3e950f628320d1b5b007d87b11906844 | add saveload.py (with error) | converter/saveload.py | converter/saveload.py | #!/usr/local/bin/python
# -*- encoding:utf-8
import sys
import subprocess as sp
import numpy
def load_mp3(filename):
command = [ 'ffmpeg',
'-i', sys.argv[1],
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ar', '44100', # ouput will have 44100 Hz
'-ac', '2', # stereo (set to '1' for mono)
'-']
pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10**8)
raw_audio = pipe.proc.stdout.read(128000*6)
# Reorganize raw_audio as a Numpy array with two-columns (1 per channel)
audio_array = numpy.fromstring(raw_audio, dtype="int16")
audio_array = audio_array.reshape(len(audio_array))
return audio_array
def save_mp3(filename,audio_array):
pipe2 = sp.Popen([ 'ffmpeg',
'-y', # (optional) means overwrite the output file if it already exists.
"-f", 's16le', # means 16bit input
"-acodec", "pcm_s16le", # means raw 16bit input
'-ar', "44100", # the input will have 44100 Hz
'-ac','2', # the input will have 2 channels (stereo)
'-i', '-', # means that the input will arrive from the pipe
'-vn', # means "don't expect any video input"
'-acodec', "libmp3lame", # output audio codec
# '-b', "3000k", # output bitrate (=quality). Here, 3000kb/second
filename],
stdin=sp.PIPE,stdout=sp.PIPE, stderr=sp.PIPE)
audio_array.astype("int16").tofile(self.proc.stdin)
def main():
ary = load_mp3(sys.argv[1])
# ary = ary.reshape((ary.shape[0]*2))
save_mp3(sys.argv[2],ary)
if __name__ == '__main__':
main()
| Python | 0 | |
4ab3e59b7e9fe339c96042107c3f59bdf1afc46a | add instagram compliance fix | requests_oauthlib/compliance_fixes/instagram.py | requests_oauthlib/compliance_fixes/instagram.py | try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
from oauthlib.common import add_params_to_uri
def instagram_compliance_fix(session):
def _non_compliant_param_name(url, headers, data):
# If the user has already specified the token, either in the URL
# or in a data dictionary, then there's nothing to do.
# If the specified token is different from ``session.access_token``,
# we assume the user intends to override the access token.
url_query = dict(parse_qs(urlparse(url).query))
token = url_query.get("token")
if not token and isinstance(data, dict):
token = data.get("token")
if token:
# Nothing to do, just return.
return url, headers, data
token = [('access_token', session.access_token)]
url = add_params_to_uri(url, token)
return url, headers, data
session.register_compliance_hook(
'protected_request', _non_compliant_param_name)
return session
| Python | 0 | |
b9b34eb2bca76e76ba4f7399b12daa27ed2ab7f4 | Create uvSetTgl.py | af_scripts/uv/uvSetTgl.py | af_scripts/uv/uvSetTgl.py | # This script will switch UV Set between "map1" and "atlasmap".
# Useage:
# Select meshes and run this script
import maya.cmds as cmds
def uvsetTgl():
shape_node = cmds.ls(sl=True, fl=True, dag=True, type='shape')
current_uvset = cmds.polyUVSet(shape_node[0],q=True, currentUVSet=True)
for shape in shape_node:
uvsets = cmds.polyUVSet(shape,q=True,auv=True)
if "map1" and "atlasUV" in uvsets:
if current_uvset[0] == 'map1':
cmds.polyUVSet(shape, currentUVSet=True, uvSet="atlasUV")
elif current_uvset[0] == 'atlasUV':
cmds.polyUVSet(shape, currentUVSet=True, uvSet="map1")
else:
cmds.polyUVSet(shape, currentUVSet=True, uvSet="map1")
elif "map1" in uvsets and "atlasUV" not in uvsets:
cmds.polyUVSet(shape, currentUVSet=True, uvSet="map1")
uvsetTgl()
| Python | 0.000002 | |
a9b45bf50dae68c9a801ec7942c4f4cc38fa08f5 | Create GenerateUnifiedReports.py | GenerateUnifiedReports.py | GenerateUnifiedReports.py | import argparse
# Read options on which PayPal records to process (year / month) or run on discovery to find the files or discover new files and generate new unified files but preserve the old ones (default)
# load all the Website records based on discovery
# load the PayPal monthly report(s)
# reconsile each record in PayPal records to identify the event and standardize the fields
# save to file the unified records
# GenerateUnifiedReports.py [no options] - this will discover which PayPay files exist wihtout corrisponsiding unified record files and generate the missing unified record files.
# GenerateUnifiedReports.py -f - this will force the generation of all unfied record files even if they already exist
# GenerateUnifiedReports.py -start 2012 01 -end 2013 07 - this will generate the unified record files for the range specified. (start year, start month, end year, end month)
parser = argparse.ArgumentParser( description='Process options for generating unified reports')
parser.add_argument( '-force', metavar='force generate')
parser.add_argument( '-start', metavar='start year / month', nargs='2')
parser.add_argument( '-end', metavar='end year / month', nargs='2')
#GenerateUnifiedRecord( paypal_filename, unified_filename )
| Python | 0 | |
24f665e02912a3f79eec9776c86863a9e172d94a | Create HR_pythonPrintFunction.py | HR_pythonPrintFunction.py | HR_pythonPrintFunction.py | import sys
if __name__ == '__main__':
n = int(input())
# imported sys for a elegant solution, Python 3
# * before range means taking everything 0 or more
print(*range(1,n+1), sep='',end='\n', file= sys.stdout)
| Python | 0.000991 | |
ce552a70f77934d4b76b5710b76b22967484d17e | Create folderwatcher.py | folderwatcher.py | folderwatcher.py | import os
import time
import datetime
outold = []
try:
while True:
out = os.listdir()
if outold != out:
ldate= datetime.datetime.now().strftime('%I:%M:%S')
for x in outold:
if x not in out:
print ('Moved: '+ldate+' '+x)
for x in out:
if x not in outold:
print ('New: '+ldate+' '+x)
outold = out
time.sleep(1)
except KeyboardInterrupt:
pass
| Python | 0.000001 | |
ee92a9d89ce2aa9ccbd8ad3de664befeb55ae892 | add API tests for data sources | tempest/api/data_processing/test_data_sources.py | tempest/api/data_processing/test_data_sources.py | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.data_processing import base as dp_base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class DataSourceTest(dp_base.BaseDataProcessingTest):
@classmethod
def setUpClass(cls):
super(DataSourceTest, cls).setUpClass()
cls.swift_data_source_with_creds = {
'url': 'swift://sahara-container.sahara/input-source',
'description': 'Test data source',
'credentials': {
'user': CONF.identity.username,
'password': CONF.identity.password
},
'type': 'swift'
}
cls.swift_data_source = cls.swift_data_source_with_creds.copy()
del cls.swift_data_source['credentials']
cls.local_hdfs_data_source = {
'url': 'input-source',
'description': 'Test data source',
'type': 'hdfs'
}
cls.external_hdfs_data_source = {
'url': 'hdfs://172.18.168.2:8020/usr/hadoop/input-source',
'description': 'Test data source',
'type': 'hdfs'
}
def _create_data_source(self, source_body, source_name=None):
"""Creates Data Source with optional name specified.
It creates a link to input-source file (it may not exist) and ensures
response status and source name. Returns id and name of created source.
"""
if not source_name:
# generate random name if it's not specified
source_name = data_utils.rand_name('sahara-data-source')
# create data source
resp, body = self.create_data_source(source_name, **source_body)
# ensure that source created successfully
self.assertEqual(202, resp.status)
self.assertEqual(source_name, body['name'])
if source_body['type'] == 'swift':
source_body = self.swift_data_source
self.assertDictContainsSubset(source_body, body)
return body['id'], source_name
def _list_data_sources(self, source_info):
# check for data source in list
resp, sources = self.client.list_data_sources()
self.assertEqual(200, resp.status)
sources_info = [(source['id'], source['name']) for source in sources]
self.assertIn(source_info, sources_info)
def _get_data_source(self, source_id, source_name, source_body):
# check data source fetch by id
resp, source = self.client.get_data_source(source_id)
self.assertEqual(200, resp.status)
self.assertEqual(source_name, source['name'])
self.assertDictContainsSubset(source_body, source)
def _delete_data_source(self, source_id):
# delete the data source by id
resp = self.client.delete_data_source(source_id)[0]
self.assertEqual(204, resp.status)
@test.attr(type='smoke')
def test_swift_data_source_create(self):
self._create_data_source(self.swift_data_source_with_creds)
@test.attr(type='smoke')
def test_swift_data_source_list(self):
source_info = self._create_data_source(
self.swift_data_source_with_creds)
self._list_data_sources(source_info)
@test.attr(type='smoke')
def test_swift_data_source_get(self):
source_id, source_name = self._create_data_source(
self.swift_data_source_with_creds)
self._get_data_source(source_id, source_name, self.swift_data_source)
@test.attr(type='smoke')
def test_swift_data_source_delete(self):
source_id = self._create_data_source(
self.swift_data_source_with_creds)[0]
self._delete_data_source(source_id)
@test.attr(type='smoke')
def test_local_hdfs_data_source_create(self):
self._create_data_source(self.local_hdfs_data_source)
@test.attr(type='smoke')
def test_local_hdfs_data_source_list(self):
source_info = self._create_data_source(self.local_hdfs_data_source)
self._list_data_sources(source_info)
@test.attr(type='smoke')
def test_local_hdfs_data_source_get(self):
source_id, source_name = self._create_data_source(
self.local_hdfs_data_source)
self._get_data_source(
source_id, source_name, self.local_hdfs_data_source)
@test.attr(type='smoke')
def test_local_hdfs_data_source_delete(self):
source_id = self._create_data_source(self.local_hdfs_data_source)[0]
self._delete_data_source(source_id)
@test.attr(type='smoke')
def test_external_hdfs_data_source_create(self):
self._create_data_source(self.external_hdfs_data_source)
@test.attr(type='smoke')
def test_external_hdfs_data_source_list(self):
source_info = self._create_data_source(self.external_hdfs_data_source)
self._list_data_sources(source_info)
@test.attr(type='smoke')
def test_external_hdfs_data_source_get(self):
source_id, source_name = self._create_data_source(
self.external_hdfs_data_source)
self._get_data_source(
source_id, source_name, self.external_hdfs_data_source)
@test.attr(type='smoke')
def test_external_hdfs_data_source_delete(self):
source_id = self._create_data_source(self.external_hdfs_data_source)[0]
self._delete_data_source(source_id)
| Python | 0.000001 | |
41fc87e402aa2864c22adb5c09a713c2b0eacb72 | Add replace test that shutdowns a node and replaces a pod (#806) | frameworks/cassandra/tests/test_recovery_shutdown.py | frameworks/cassandra/tests/test_recovery_shutdown.py | import pytest
from tests.config import *
import sdk_install as install
import sdk_tasks as tasks
import sdk_utils as utils
import json
import shakedown
import time
import sdk_cmd as cmd
def setup_module(module):
install.uninstall(PACKAGE_NAME)
utils.gc_frameworks()
# check_suppression=False due to https://jira.mesosphere.com/browse/CASSANDRA-568
install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, check_suppression=False)
def setup_function(function):
tasks.check_running(PACKAGE_NAME, DEFAULT_TASK_COUNT)
def teardown_module(module):
install.uninstall(PACKAGE_NAME)
@pytest.mark.sanity
@pytest.mark.recovery
@pytest.mark.shutdown_node
def test_shutdown_host_test():
service_ip = shakedown.get_service_ips(PACKAGE_NAME).pop()
print('marathon ip = {}'.format(service_ip))
node_ip = 0
for pod_id in range(0, DEFAULT_TASK_COUNT):
node_ip = get_pod_host(pod_id)
if node_ip != service_ip:
break
if node_ip is None:
assert Fail, 'could not find a node to shutdown'
old_agent = get_pod_agent(pod_id)
print('pod id = {}, node_ip = {}, agent = {}'.format(pod_id, node_ip, old_agent))
task_ids = tasks.get_task_ids(PACKAGE_NAME, 'node-{}'.format(pod_id))
# instead of partition/reconnect, we shutdown host permanently
status, stdout = shakedown.run_command_on_agent(node_ip, 'sudo shutdown -h +1')
print('shutdown agent {}: [{}] {}'.format(node_ip, status, stdout))
assert status is True
time.sleep(100)
cmd.run_cli('cassandra pods replace node-{}'.format(pod_id))
tasks.check_tasks_updated(PACKAGE_NAME, 'node', task_ids)
#double check all tasks are running
tasks.check_running(PACKAGE_NAME, DEFAULT_TASK_COUNT)
new_agent = get_pod_agent(pod_id)
assert old_agent != new_agent
def get_pod_agent(id):
stdout = cmd.run_cli('cassandra pods info node-{}'.format(id))
return json.loads(stdout)[0]['info']['slaveId']['value']
def get_pod_label(id):
stdout = cmd.run_cli('cassandra pods info node-{}'.format(id))
return json.loads(stdout)[0]['info']['labels']['labels']
def get_pod_host(id):
labels = get_pod_label(id)
for i in range(0, len(labels)):
if labels[i]['key'] == 'offer_hostname':
return labels[i]['value']
return None
| Python | 0 | |
c11e74d4210c6de8917dfde6cb33d75f6b1b835a | add migration that solves BigAutoField problem | hordak/migrations/0032_check_account_type_big_int.py | hordak/migrations/0032_check_account_type_big_int.py | # Generated by Django 4.0.7 on 2022-09-18 10:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("hordak", "0031_alter_account_currencies"),
]
operations = [
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION check_account_type()
RETURNS TRIGGER AS
$$
BEGIN
IF NEW.parent_id::INT::BOOL THEN
NEW.type = (SELECT type FROM hordak_account WHERE id = NEW.parent_id);
END IF;
RETURN NEW;
END;
$$
LANGUAGE plpgsql;
""",
"DROP FUNCTION check_account_type()",
),
]
| Python | 0 | |
d3a11021f8be8e93c5c067b5fcf59bc4f9f92cea | add computation of sts for ISUSM | scripts/dbutil/compute_isusm_sts.py | scripts/dbutil/compute_isusm_sts.py | """
Figure out when the ISUSM data started...
"""
import psycopg2
import network
import sys
import datetime
import pytz
basets = datetime.datetime.now()
basets = basets.replace(tzinfo=pytz.timezone("America/Chicago"))
isuag = psycopg2.connect(database='isuag', host='iemdb')
icursor = isuag.cursor()
mesosite = psycopg2.connect(database='mesosite', host='iemdb')
mcursor = mesosite.cursor()
table = network.Table("ISUSM")
icursor.execute("""SELECT station, min(valid), max(valid) from sm_hourly
GROUP by station ORDER by min ASC""")
for row in icursor:
station = row[0]
if not table.sts.has_key(station):
print 'Whoa station: %s does not exist in metadatabase?' % (station,)
continue
if table.sts[station]['archive_begin'] != row[1]:
print 'Updated %s STS WAS: %s NOW: %s' % (station,
table.sts[station]['archive_begin'], row[1])
mcursor.execute("""UPDATE stations SET archive_begin = %s
WHERE id = %s and network = %s""" , (row[1], station, 'ISUSM') )
if mcursor.rowcount == 0:
print 'ERROR: No rows updated'
mcursor.close()
mesosite.commit()
mesosite.close()
| Python | 0.000376 | |
133a4311fdb3c96edeb927250e549fcaf4080696 | add silly module | modules/silly.py | modules/silly.py | # -*- coding: ISO-8859-15 -*-
from core.Uusipuu import UusipuuModule
import random, time
from core.tdiff import *
class Module(UusipuuModule):
def cmd_noppa(self, user, target, params):
self.log('ok noppaa heitetn!!')
self.chanmsg('%s!' % random.choice((
'ykknen',
'kakkonen',
'kolmonen',
'nelonen',
'vitonen',
'kutonen')))
# vim: set et sw=4:
| Python | 0.000001 | |
f340bde6e047d86171385b90a023ac01e8914d0c | Add simple neural network (#6452) | neural_network/simple_neural_network.py | neural_network/simple_neural_network.py | """
Forward propagation explanation:
https://towardsdatascience.com/forward-propagation-in-neural-networks-simplified-math-and-code-version-bbcfef6f9250
"""
import math
import random
# Sigmoid
def sigmoid_function(value: float, deriv: bool = False) -> float:
"""Return the sigmoid function of a float.
>>> sigmoid_function(3.5)
0.9706877692486436
>>> sigmoid_function(3.5, True)
-8.75
"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value))
# Initial Value
INITIAL_VALUE = 0.02
def forward_propagation(expected: int, number_propagations: int) -> float:
"""Return the value found after the forward propagation training.
>>> res = forward_propagation(32, 10000000)
>>> res > 31 and res < 33
True
>>> res = forward_propagation(32, 1000)
>>> res > 31 and res < 33
False
"""
# Random weight
weight = float(2 * (random.randint(1, 100)) - 1)
for _ in range(number_propagations):
# Forward propagation
layer_1 = sigmoid_function(INITIAL_VALUE * weight)
# How much did we miss?
layer_1_error = (expected / 100) - layer_1
# Error delta
layer_1_delta = layer_1_error * sigmoid_function(layer_1, True)
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_1 * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
expected = int(input("Expected value: "))
number_propagations = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| Python | 0 | |
4b07d7cdd791a03ef4c7ec7e6e4188b625ffb8dc | Add migration | src/clarityv2/portfolio/migrations/0002_auto_20180228_2055.py | src/clarityv2/portfolio/migrations/0002_auto_20180228_2055.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-28 18:55
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='description',
field=ckeditor.fields.RichTextField(blank=True),
),
]
| Python | 0.000002 | |
0bc48c7131e0589e7f2980e16bce6c2dfcdbafda | Fix usage message from tag:file to tag=file | python/utils.py | python/utils.py | '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
Copyright (C) 2011 Ariya Hidayat <ariya.hidayat@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import argparse
version_major = 1
version_minor = 1
version_patch = 0
version = '%d.%d.%d' % (version_major, version_minor, version_patch)
license = '''
PyPhantomJS Version %s
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
''' % version
def argParser():
parser = argparse.ArgumentParser(
description='Minimalistic headless WebKit-based JavaScript-driven tool',
usage='%(prog)s [options] script.[js|coffee] [script argument [script argument ...]]',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--load-images', default='yes',
choices=['yes', 'no'],
help='Load all inlined images (default: %(default)s)'
)
parser.add_argument('--load-plugins', default='no',
choices=['yes', 'no'],
help='Load all plugins (i.e. Flash, Silverlight, ...)\n(default: %(default)s)'
)
parser.add_argument('--proxy', metavar='address:port',
help='Set the network proxy'
)
parser.add_argument('--upload-file', nargs='*',
metavar='tag=file', help='Upload 1 or more files'
)
parser.add_argument('script', metavar='script.[js|coffee]', nargs='*',
help='The script to execute, and any args to pass to it'
)
parser.add_argument('--version',
action='version', version=license,
help='show this program\'s version and license'
)
return parser
| '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
Copyright (C) 2011 Ariya Hidayat <ariya.hidayat@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import argparse
version_major = 1
version_minor = 1
version_patch = 0
version = '%d.%d.%d' % (version_major, version_minor, version_patch)
license = '''
PyPhantomJS Version %s
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
''' % version
def argParser():
parser = argparse.ArgumentParser(
description='Minimalistic headless WebKit-based JavaScript-driven tool',
usage='%(prog)s [options] script.[js|coffee] [script argument [script argument ...]]',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--load-images', default='yes',
choices=['yes', 'no'],
help='Load all inlined images (default: %(default)s)'
)
parser.add_argument('--load-plugins', default='no',
choices=['yes', 'no'],
help='Load all plugins (i.e. Flash, Silverlight, ...)\n(default: %(default)s)'
)
parser.add_argument('--proxy', metavar='address:port',
help='Set the network proxy'
)
parser.add_argument('--upload-file', nargs='*',
metavar='tag:file', help='Upload 1 or more files'
)
parser.add_argument('script', metavar='script.[js|coffee]', nargs='*',
help='The script to execute, and any args to pass to it'
)
parser.add_argument('--version',
action='version', version=license,
help='show this program\'s version and license'
)
return parser
| Python | 0.000623 |
4065a08ea401e0d95e8d40d9d735edf92edda861 | Add unit tests on cache handler | oslo_policy/tests/test_cache_handler.py | oslo_policy/tests/test_cache_handler.py | # Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the cache handler module"""
import os
import fixtures
from oslotest import base as test_base
from oslo_policy import _cache_handler as _ch
class CacheHandlerTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.tmpdir = self.useFixture(fixtures.TempDir())
def test_read_cached_file(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertTrue(reloaded)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertEqual('test', data)
self.assertFalse(reloaded)
reloaded, data = _ch.read_cached_file(
file_cache, path, force_reload=True)
self.assertEqual('test', data)
self.assertTrue(reloaded)
def test_read_cached_file_with_updates(self):
file_cache = {}
path = os.path.join(self.tmpdir.path, 'tmpfile')
with open(path, 'w+') as fp:
fp.write('test')
reloaded, data = _ch.read_cached_file(file_cache, path)
# update the timestamps
times = (os.stat(path).st_atime + 1, os.stat(path).st_mtime + 1)
os.utime(path, times)
reloaded, data = _ch.read_cached_file(file_cache, path)
self.assertTrue(reloaded)
| Python | 0.000002 | |
bcc7dc9bc014dfd17db6fced18de47535a003b60 | add import JSON script for propostions #2 | scripts/import_proposition_json.py | scripts/import_proposition_json.py | import argparse
import json
import logging
import sqlalchemy.orm
import transaction
from eliot import log_call, start_task
logging.basicConfig(level=logging.INFO)
from ekklesia_portal.app import make_wsgi_app
@log_call
def load_proposition_json_file(filepath):
with open(filepath) as f:
json_data = json.load(f)
# TODO: Slugify tag_names!
required_fields = {
'title',
'author',
'abstract',
'content'
}
optional_fields = {
'motivation',
'tags',
'external_discussion_url'
}
imported = {}
# TODO: Use Sets to find out which keys are missing!
for key in required_fields:
try:
value = json_data[key]
except KeyError:
raise KeyError(f"malformed wiki json_data JSON, key '{key}' not found!")
imported[key] = value
for key in optional_fields:
imported[key] = json_data.get(key)
if "type" in json_data:
imported["tags"].append(json_data["type"])
if "group" in json_data:
imported["tags"].append(json_data["group"])
imported["tags"] = list(set(imported["tags"]))
return imported
@log_call
def insert_proposition(department_name, title, abstract, content, motivation, author, tags, external_discussion_url):
department = session.query(Department).filter_by(name=department_name).one()
maybe_subject_area = [area for area in department.areas if area.name == "Allgemein"]
if not maybe_subject_area:
raise ValueError("Subject area 'Allgemein' not found! Please create it!")
subject_area = maybe_subject_area[0]
user = session.query(User).filter_by(name=author).scalar()
if user is None:
user = User(name=author, auth_type="import")
ballot = Ballot(area=subject_area)
proposition = Proposition(title=title, abstract=abstract, content=content, motivation=motivation,
external_discussion_url=external_discussion_url, ballot=ballot)
for tag_name in tags:
tag = session.query(Tag).filter_by(name=tag_name).scalar()
if tag is None:
tag = Tag(name=tag_name)
proposition.tags.append(tag)
supporter = Supporter(member=user, proposition=proposition, submitter=True)
session.add(supporter)
parser = argparse.ArgumentParser("Ekklesia Portal import_proposition_json.py")
parser.add_argument("-c", "--config-file", help=f"path to config file in YAML / JSON format")
parser.add_argument("-d", "--department", help=f"Choose the department to import to.")
parser.add_argument('filenames', nargs='+')
if __name__ == "__main__":
logg = logging.getLogger(__name__)
args = parser.parse_args()
app = make_wsgi_app(args.config_file)
from ekklesia_portal.database.datamodel import Ballot, Department, Proposition, User, Supporter, Tag
from ekklesia_portal.database import Session
session = Session()
sqlalchemy.orm.configure_mappers()
for fp in args.filenames:
with start_task(action_type="import_proposition"):
imported_data = load_proposition_json_file(fp)
insert_proposition(args.department, **imported_data)
transaction.commit()
| Python | 0 | |
09a413c3d02d177a85872c7957591f18489a9ed0 | Add test for linearmodel functions | seaborn/tests/test_linearmodels.py | seaborn/tests/test_linearmodels.py | import numpy as np
import statsmodels.api as sm
import nose.tools as nt
import numpy.testing as npt
from .. import linearmodels as lm
rs = np.random.RandomState(0)
class TestRegPlot(object):
"""Test internal functions that perform computation for regplot()."""
x = rs.randn(50)
x_discrete = np.repeat([0, 1], 25)
y = 2 + 1.5 * 2 + rs.randn(50)
grid = np.linspace(-3, 3, 30)
n_boot = 20
ci = 95
bins_numeric = 3
bins_given = [-1, 0, 1]
def test_regress_fast(self):
"""Validate fast regression fit and bootstrap."""
# Fit with the "fast" function, which just does linear algebra
fast = lm._regress_fast(self.grid, self.x, self.y,
self.ci, self.n_boot)
yhat_fast, _ = fast
# Fit using the statsmodels function with an OLS model
smod = lm._regress_statsmodels(self.grid, self.x, self.y, sm.OLS,
self.ci, self.n_boot)
yhat_smod, _ = smod
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_fast, yhat_smod)
def test_regress_poly(self):
"""Validate polyfit-based regression fit and bootstrap."""
# Fit an first-order polynomial
poly = lm._regress_poly(self.grid, self.x, self.y, 1,
self.ci, self.n_boot)
yhat_poly, _ = poly
# Fit using the statsmodels function with an OLS model
smod = lm._regress_statsmodels(self.grid, self.x, self.y, sm.OLS,
self.ci, self.n_boot)
yhat_smod, _ = smod
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_poly, yhat_smod)
def test_regress_n_boot(self):
"""Test correct bootstrap size for internal regression functions."""
args = self.grid, self.x, self.y
# Fast (linear algebra) version
fast = lm._regress_fast(*args, ci=self.ci, n_boot=self.n_boot)
_, boots_fast = fast
npt.assert_equal(boots_fast.shape, (self.n_boot, self.grid.size))
# Slower (np.polyfit) version
poly = lm._regress_poly(*args, order=1, ci=self.ci, n_boot=self.n_boot)
_, boots_poly = poly
npt.assert_equal(boots_poly.shape, (self.n_boot, self.grid.size))
# Slowest (statsmodels) version
smod = lm._regress_statsmodels(*args, model=sm.OLS,
ci=self.ci, n_boot=self.n_boot)
_, boots_smod = smod
npt.assert_equal(boots_smod.shape, (self.n_boot, self.grid.size))
def test_regress_noboot(self):
"""Test that regression functions return None if not bootstrapping."""
args = self.grid, self.x, self.y
# Fast (linear algebra) version
fast = lm._regress_fast(*args, ci=None, n_boot=self.n_boot)
_, boots_fast = fast
nt.assert_is(boots_fast, None)
# Slower (np.polyfit) version
poly = lm._regress_poly(*args, order=1, ci=None, n_boot=self.n_boot)
_, boots_poly = poly
nt.assert_is(boots_poly, None)
# Slowest (statsmodels) version
smod = lm._regress_statsmodels(*args, model=sm.OLS,
ci=None, n_boot=self.n_boot)
_, boots_smod = smod
nt.assert_is(boots_smod, None)
def test_numeric_bins(self):
"""Test discretizing x into `n` bins."""
x_binned, bins = lm._bin_predictor(self.x, self.bins_numeric)
npt.assert_equal(len(bins), self.bins_numeric)
npt.assert_array_equal(np.unique(x_binned), bins)
def test_provided_bins(self):
"""Test discretizing x into provided bins."""
x_binned, bins = lm._bin_predictor(self.x, self.bins_given)
npt.assert_array_equal(np.unique(x_binned), self.bins_given)
def test_binning(self):
"""Test that the binning actually works."""
x_binned, bins = lm._bin_predictor(self.x, self.bins_given)
nt.assert_greater(self.x[x_binned == 0].min(),
self.x[x_binned == -1].max())
nt.assert_greater(self.x[x_binned == 1].min(),
self.x[x_binned == 0].max())
def test_point_est(self):
"""Test statistic estimation for discrete input data."""
x_vals, points, cis = lm._point_est(self.x_discrete, self.y, np.mean,
self.ci, self.n_boot)
npt.assert_array_equal(x_vals, sorted(np.unique(self.x_discrete)))
nt.assert_equal(len(points), np.unique(self.x_discrete).size)
nt.assert_equal(np.shape(cis), (np.unique(self.x_discrete).size, 2))
def test_point_ci(self):
"""Test the confidence interval in the point estimate function."""
_, _, big_cis = lm._point_est(self.x_discrete, self.y,
np.mean, 95, self.n_boot)
_, _, wee_cis = lm._point_est(self.x_discrete, self.y,
np.mean, 15, self.n_boot)
npt.assert_array_less(np.diff(wee_cis), np.diff(big_cis))
| Python | 0.000002 | |
b3a7bca64b256dcc09b8ad49a7491e7a3717e74f | disable automatic deployment of images (needs verification) | planetstack/observer/steps/sync_image_deployments.py | planetstack/observer/steps/sync_image_deployments.py | import os
import base64
from collections import defaultdict
from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.deployment import Deployment
from core.models.image import Image, ImageDeployments
from util.logger import Logger, logging
logger = Logger(level=logging.INFO)
class SyncImageDeployments(OpenStackSyncStep):
provides=[ImageDeployments]
requested_interval=0
def fetch_pending(self):
# smbaker: commented out automatic creation of ImageDeployments as
# as they will now be configured in GUI. Not sure if this is
# sufficient.
# # ensure images are available across all deployments
# image_deployments = ImageDeployments.objects.all()
# image_deploy_lookup = defaultdict(list)
# for image_deployment in image_deployments:
# image_deploy_lookup[image_deployment.image].append(image_deployment.deployment)
#
# all_deployments = Deployment.objects.all()
# for image in Image.objects.all():
# expected_deployments = all_deployments
# for expected_deployment in expected_deployments:
# if image not in image_deploy_lookup or \
# expected_deployment not in image_deploy_lookup[image]:
# id = ImageDeployments(image=image, deployment=expected_deployment)
# id.save()
# now we return all images that need to be enacted
return ImageDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
def sync_record(self, image_deployment):
logger.info("Working on image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
driver = self.driver.admin_driver(deployment=image_deployment.deployment.name)
images = driver.shell.glance.get_images()
glance_image = None
for image in images:
if image['name'] == image_deployment.image.name:
glance_image = image
break
if glance_image:
logger.info("Found image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
image_deployment.glance_image_id = glance_image['id']
elif image_deployment.image.path:
image = {
'name': image_deployment.image.name,
'is_public': True,
'disk_format': 'raw',
'container_format': 'bare',
'file': image_deployment.image.path,
}
logger.info("Creating image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
glance_image = driver.shell.glanceclient.images.create(name=image_deployment.image.name,
is_public=True,
disk_format='raw',
container_format='bare')
glance_image.update(data=open(image_deployment.image.path, 'rb'))
# While the images returned by driver.shell.glance.get_images()
# are dicts, the images returned by driver.shell.glanceclient.images.create
# are not dicts. We have to use getattr() instead of [] operator.
if not glance_image or not getattr(glance_image,"id",None):
raise Exception, "Add image failed at deployment %s" % image_deployment.deployment.name
image_deployment.glance_image_id = getattr(glance_image, "id")
image_deployment.save()
| import os
import base64
from collections import defaultdict
from django.db.models import F, Q
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.deployment import Deployment
from core.models.image import Image, ImageDeployments
from util.logger import Logger, logging
logger = Logger(level=logging.INFO)
class SyncImageDeployments(OpenStackSyncStep):
provides=[ImageDeployments]
requested_interval=0
def fetch_pending(self):
# ensure images are available across all deployments
image_deployments = ImageDeployments.objects.all()
image_deploy_lookup = defaultdict(list)
for image_deployment in image_deployments:
image_deploy_lookup[image_deployment.image].append(image_deployment.deployment)
all_deployments = Deployment.objects.all()
for image in Image.objects.all():
expected_deployments = all_deployments
for expected_deployment in expected_deployments:
if image not in image_deploy_lookup or \
expected_deployment not in image_deploy_lookup[image]:
id = ImageDeployments(image=image, deployment=expected_deployment)
id.save()
# now we return all images that need to be enacted
return ImageDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
def sync_record(self, image_deployment):
logger.info("Working on image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
driver = self.driver.admin_driver(deployment=image_deployment.deployment.name)
images = driver.shell.glance.get_images()
glance_image = None
for image in images:
if image['name'] == image_deployment.image.name:
glance_image = image
break
if glance_image:
logger.info("Found image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
image_deployment.glance_image_id = glance_image['id']
elif image_deployment.image.path:
image = {
'name': image_deployment.image.name,
'is_public': True,
'disk_format': 'raw',
'container_format': 'bare',
'file': image_deployment.image.path,
}
logger.info("Creating image %s on deployment %s" % (image_deployment.image.name, image_deployment.deployment.name))
glance_image = driver.shell.glanceclient.images.create(name=image_deployment.image.name,
is_public=True,
disk_format='raw',
container_format='bare')
glance_image.update(data=open(image_deployment.image.path, 'rb'))
# While the images returned by driver.shell.glance.get_images()
# are dicts, the images returned by driver.shell.glanceclient.images.create
# are not dicts. We have to use getattr() instead of [] operator.
if not glance_image or not getattr(glance_image,"id",None):
raise Exception, "Add image failed at deployment %s" % image_deployment.deployment.name
image_deployment.glance_image_id = getattr(glance_image, "id")
image_deployment.save()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.