commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
ad777af05d2995ee43b3d64ce435cc96379fa9a2 | add iostat template | graph_templates/iostat.py | graph_templates/iostat.py | from . import GraphTemplate
class IostatTemplate(GraphTemplate):
'''
corresponds to diamond diskusage plugin
'''
target_types = {
'gauge': {
'match': '^servers\.(?P<server>[^\.]+)\.iostat\.(?P<device>[^\.]+)\.(?P<type>.*)$',
'default_group_by': 'server',
'default_graph_options': {'state': 'stacked'}
},
'rate': {
'match': '^servers\.(?P<server>[^\.]+)\.iostat\.(?P<device>[^\.]+)\.(?P<type>.*)_per_second$',
'default_group_by': 'server',
'default_graph_options': {'state': 'stacked', 'vtitle': 'events/s'}
}
}
# vim: ts=4 et sw=4:
| Python | 0 | |
5a47b4f3b13c2d66a9e226eeb90dfddef048279f | Implement the host and address options in runserver | djangae/management/commands/runserver.py | djangae/management/commands/runserver.py | import os
from django.core.management.commands.runserver import BaseRunserverCommand
from datetime import datetime
class Command(BaseRunserverCommand):
"""
Overrides the default Django runserver command.
Instead of starting the default Django development server this
command fires up a copy of the full fledged App Engine
dev_appserver that emulates the live environment your application
will be deployed to.
"""
def inner_run(self, *args, **options):
import sys
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
from djangae.utils import find_project_root
from djangae.sandbox import _find_sdk_from_python_path
from django.conf import settings
from django.utils import translation
# Check for app.yaml
expected_path = os.path.join(find_project_root(), "app.yaml")
if not os.path.exists(expected_path):
sys.stderr.write("Unable to find app.yaml at '%s'\n" % expected_path)
sys.exit(1)
self.stdout.write("Validating models...\n\n")
self.validate(display_num_errors=True)
self.stdout.write((
"%(started_at)s\n"
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"started_at": datetime.now().strftime('%B %d, %Y - %X'),
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr,
"port": self.port,
"quit_command": quit_command,
})
sys.stdout.write("\n")
sys.stdout.flush()
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
# Will have been set by setup_paths
sdk_path = _find_sdk_from_python_path()
from google.appengine.tools.devappserver2 import devappserver2
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import python_runtime
from djangae import sandbox
sandbox._OPTIONS.port = int(self.port) if self.port else sandbox._OPTIONS.port
sandbox._OPTIONS.host = self.addr if self.addr else sandbox._OPTIONS.host
class NoConfigDevServer(devappserver2.DevelopmentServer):
@staticmethod
def _create_api_server(request_data, storage_path, options, configuration):
return api_server.APIServer(options.api_host, options.api_port, configuration.app_id)
python_runtime._RUNTIME_PATH = os.path.join(sdk_path, '_python_runtime.py')
python_runtime._RUNTIME_ARGS = [sys.executable, python_runtime._RUNTIME_PATH]
devappserver = NoConfigDevServer()
devappserver.start(sandbox._OPTIONS)
if shutdown_message:
sys.stdout.write(shutdown_message)
return
| import os
from django.core.management.commands.runserver import BaseRunserverCommand
from datetime import datetime
class Command(BaseRunserverCommand):
"""
Overrides the default Django runserver command.
Instead of starting the default Django development server this
command fires up a copy of the full fledged App Engine
dev_appserver that emulates the live environment your application
will be deployed to.
"""
def inner_run(self, *args, **options):
import sys
shutdown_message = options.get('shutdown_message', '')
# We use the old dev appserver if threading is disabled or --old was passed
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
from djangae.utils import find_project_root
from djangae.sandbox import _find_sdk_from_python_path
from django.conf import settings
from django.utils import translation
# Check for app.yaml
expected_path = os.path.join(find_project_root(), "app.yaml")
if not os.path.exists(expected_path):
sys.stderr.write("Unable to find app.yaml at '%s'\n" % expected_path)
sys.exit(1)
self.stdout.write("Validating models...\n\n")
self.validate(display_num_errors=True)
self.stdout.write((
"%(started_at)s\n"
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"started_at": datetime.now().strftime('%B %d, %Y - %X'),
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr,
"port": self.port,
"quit_command": quit_command,
})
sys.stdout.write("\n")
sys.stdout.flush()
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
# Will have been set by setup_paths
sdk_path = _find_sdk_from_python_path()
from google.appengine.tools.devappserver2 import devappserver2
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import python_runtime
from djangae import sandbox
class NoConfigDevServer(devappserver2.DevelopmentServer):
@staticmethod
def _create_api_server(request_data, storage_path, options, configuration):
return api_server.APIServer(options.api_host, options.api_port, configuration.app_id)
python_runtime._RUNTIME_PATH = os.path.join(sdk_path, '_python_runtime.py')
python_runtime._RUNTIME_ARGS = [sys.executable, python_runtime._RUNTIME_PATH]
devappserver = NoConfigDevServer()
devappserver.start(sandbox._OPTIONS)
if shutdown_message:
sys.stdout.write(shutdown_message)
return
| Python | 0.000001 |
1637b53727f81c9528c47effab172f86a58e8b9a | Add script register_ph_migrate.py to mass register and migrate placeholders remotely | ereuse_devicehub/scripts/register_ph_migrate.py | ereuse_devicehub/scripts/register_ph_migrate.py | import argparse
import requests
from ereuse_devicehub.security.request_auth import Auth
def create_placeholders_and_migrate(base_url, email, password, n_placeholders, origin_db, dest_db, label=None,
comment=None):
"""
Remotely connects to a devicehub, creates n_placeholders placeholders and then migrates them to a dest_db
in the same devicehub.
"""
try:
auth = Auth(base_url, email, password)
snapshot = {
"@type": "devices:Register",
"device": {
"@type": "Device",
"placeholder": True
}
}
devices_id = []
for _ in range(0, n_placeholders):
r = requests.post('{}/{}/events/devices/register'.format(base_url, origin_db), json=snapshot, auth=auth)
r.raise_for_status()
result = r.json()
devices_id.append(result['device'])
migrate = {
"@type": "devices:Migrate",
"label": label,
"to": {
"baseUrl": "https://devicehub.ereuse.org/",
"database": dest_db
},
'devices': devices_id,
"comment": comment
}
r = requests.post('{}/{}/events/devices/migrate'.format(base_url, origin_db), json=migrate, auth=auth)
r.raise_for_status()
except Exception as e:
raise e
if __name__ == '__main__':
desc = 'Creates a number of placeholders and then migrates them to another database. ' \
'This method executes remotely to any DeviceHub on the web.'
epilog = 'Example: python register_ph_migrate.py http://api.foo.bar a@a.a pass 25 db1 db2' \
' -l "Migrate to DB2" -c "This migrate represents..."'
parser = argparse.ArgumentParser(description=desc, epilog=epilog)
parser.add_argument('base_url', help='Ex: https://api.devicetag.io')
parser.add_argument('email')
parser.add_argument('password')
parser.add_argument('n_placeholders', help='Number of placeholders to create and migrate', type=int)
parser.add_argument('origin_db', help='Name of the database where placeholders are Registered and them moved from')
parser.add_argument('dest_db', help='Destination db')
parser.add_argument('-l', '--label')
parser.add_argument('-c', '--comment')
args = vars(parser.parse_args()) # If --help or -h or wrong value this will print message to user and abort
create_placeholders_and_migrate(**args)
| Python | 0 | |
417e8d1b63b4b343de3a81366d2d2ce433f089dd | Add paf (#294) | jcvi/formats/paf.py | jcvi/formats/paf.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# paf.py
# formats
#
# Created by Haibao Tang on 09/03/20
# Copyright © 2020 Haibao Tang. All rights reserved.
#
import sys
import logging
from jcvi.formats.base import must_open
from jcvi.apps.base import OptionParser, ActionDispatcher
class PAFLine:
"""
PAF specification
https://github.com/lh3/miniasm/blob/master/PAF.md
"""
__slots__ = (
"query",
"qsize",
"qstart",
"qstop",
"orientation",
"subject",
"ssize",
"sstart",
"sstop",
"nmatch",
"hitlen",
"mapq",
)
def __init__(self, row):
args = row.split()
self.query = args[0]
self.qsize = int(args[1])
self.qstart = int(args[2]) + 1
self.qstop = int(args[3])
self.orientation = args[4]
self.subject = args[5]
self.ssize = int(args[6])
self.sstart = int(args[7]) + 1
self.sstop = int(args[8])
self.nmatch = int(args[9])
self.hitlen = int(args[10])
self.mapq = int(args[11])
@property
def sbedline(self):
return "\t".join(
str(x)
for x in (
self.subject,
self.sstart - 1,
self.sstop,
self.query,
self.hitlen,
self.orientation,
)
)
@property
def qbedline(self):
return "\t".join(
str(x)
for x in (
self.query,
self.qstart - 1,
self.qstop,
self.subject,
self.hitlen,
self.orientation,
)
)
def bed(args):
"""
%prog bed paffile
Print out BED file based on coordinates in BLAST PAF results. By default,
write out subject positions. Use --swap to write query positions.
"""
from jcvi.formats.bed import sort as sort_bed
p = OptionParser(bed.__doc__)
p.add_option(
"--swap", default=False, action="store_true", help="Write query positions"
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(paffile,) = args
write_qbed = opts.swap
bedfile = "{}.{}.bed".format(
paffile.rsplit(".", 1)[0], "query" if write_qbed else "subject"
)
with must_open(paffile) as fp, open(bedfile, "w") as fw:
for row in fp:
b = PAFLine(row)
if write_qbed:
print(b.qbedline, file=fw)
else:
print(b.sbedline, file=fw)
logging.debug("File written to `{}`".format(bedfile))
sort_bed([bedfile, "-i"])
return bedfile
def main():
actions = (("bed", "get BED file from PAF"),)
p = ActionDispatcher(actions)
p.dispatch(globals())
if __name__ == "__main__":
main()
| Python | 0 | |
54641126cd8d662c6443aff1e6fe238c4bb09932 | Add PowerAnalysers Voltech PMn000 | engineering_project/Instrument/PowerAnalyser.py | engineering_project/Instrument/PowerAnalyser.py | # import time
# import logging
# from scipy.interpolate import UnivariateSpline
# import numpy as np
try:
from Instrument.GenericInstrument import GenericInstrument
from Instrument.IEEE488 import IEEE488
from Instrument.SCPI import SCPI
except ImportError:
from GenericInstrument import GenericInstrument
from IEEE488 import IEEE488
from SCPI import SCPI
class PowerAnalyser(GenericInstrument):
"""Parent class for PowerAnalysers."""
def __init__(self, instrument):
"""."""
super().__init__(instrument)
def __repr__(self):
"""."""
return"{}, {}".format(__class__, self.instrument)
class VoltechPM3000A(PowerAnalyser, IEEE488):
"""Voltech PM3000A.
.. figure:: images/PowerAnalyser/VoltechPM3000A.jpg
"""
class VoltechPM1000P(PowerAnalyser, IEEE488):
"""Voltech PM1000+.
.. figure:: images/PowerAnalyser/VoltechPM1000P.jpg
"""
# REGISTER = {}
| Python | 0 | |
d4a5e1bf3e44a8cfe4bd3a9d1900803613e6da67 | Add C_O_L_R_test.py to check basic compile/decompile of otTables.COLR | Tests/ttLib/tables/C_O_L_R_test.py | Tests/ttLib/tables/C_O_L_R_test.py | from fontTools import ttLib
from fontTools.misc.testTools import getXML, parseXML
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter
import pytest
COLR_DATA = (
b"\x00\x00" # Version
b"\x00\x01" # BaseGlyphRecordCount
b"\x00\x00\x00\x0e" # Offset to BaseGlyphRecordArray
b"\x00\x00\x00\x14" # Offset to LayerRecordArray
b"\x00\x03" # LayerRecordCount
b"\x00\x06" # BaseGlyphRecord[0].BaseGlyph
b"\x00\x00" # BaseGlyphRecord[0].FirstLayerIndex
b"\x00\x03" # BaseGlyphRecord[0].NumLayers
b"\x00\x07" # LayerRecord[0].LayerGlyph
b"\x00\x00" # LayerRecord[0].PaletteIndex
b"\x00\x08" # LayerRecord[1].LayerGlyph
b"\x00\x01" # LayerRecord[1].PaletteIndex
b"\x00\t" # LayerRecord[2].LayerGlyph
b"\x00\x02" # LayerRecord[3].PaletteIndex
)
COLR_XML = [
"<COLR>",
' <Version value="0"/>',
" <!-- BaseGlyphRecordCount=1 -->",
" <BaseGlyphRecordArray>",
' <BaseGlyphRecord index="0">',
' <BaseGlyph value="glyph00006"/>',
' <FirstLayerIndex value="0"/>',
' <NumLayers value="3"/>',
" </BaseGlyphRecord>",
" </BaseGlyphRecordArray>",
" <LayerRecordArray>",
' <LayerRecord index="0">',
' <LayerGlyph value="glyph00007"/>',
' <PaletteIndex value="0"/>',
" </LayerRecord>",
' <LayerRecord index="1">',
' <LayerGlyph value="glyph00008"/>',
' <PaletteIndex value="1"/>',
" </LayerRecord>",
' <LayerRecord index="2">',
' <LayerGlyph value="glyph00009"/>',
' <PaletteIndex value="2"/>',
" </LayerRecord>",
" </LayerRecordArray>",
" <!-- LayerRecordCount=3 -->",
"</COLR>",
]
def dump(table, ttFont=None):
print("\n".join(getXML(table.toXML, ttFont)))
@pytest.fixture
def font():
font = ttLib.TTFont()
font.setGlyphOrder(["glyph%05d" % i for i in range(10)])
return font
def test_decompile_and_compile(font):
colr = ot.COLR()
reader = OTTableReader(COLR_DATA)
colr.decompile(reader, font)
writer = OTTableWriter()
colr.compile(writer, font)
data = writer.getAllData()
assert data == COLR_DATA
def test_decompile_and_dump_xml(font):
colr = ot.COLR()
reader = OTTableReader(COLR_DATA)
colr.decompile(reader, font)
dump(colr, font)
assert getXML(colr.toXML, font) == COLR_XML
| Python | 0 | |
884861de58ddfb12f2f5d15ce35349c74eab0c4e | Create 5009-set_bio_gripper.py | example/wrapper/common/5009-set_bio_gripper.py | example/wrapper/common/5009-set_bio_gripper.py | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, UFACTORY, Inc.
# All rights reserved.
#
# Author: Hutton <geweipan@ufactory.cc>
"""
Example: Bio Gripper Control
Please make sure that the gripper is attached to the end.
"""
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from xarm.wrapper import XArmAPI
from configparser import ConfigParser
parser = ConfigParser()
parser.read('../robot.conf')
try:
ip = parser.get('xArm', 'ip')
except:
ip = input('Please input the xArm ip address[192.168.1.194]:')
if not ip:
ip = '192.168.1.194'
arm = XArmAPI(ip)
time.sleep(0.5)
if arm.warn_code != 0:
arm.clean_warn()
if arm.error_code != 0:
arm.clean_error()
arm.motion_enable(enable=True) #gripper enable
time.sleep(2) #Initialize the wait time
arm.set_gripper_position(-10,wait=False,auto_enable=True,speed=900,timeout=10) #gripper open
time.sleep(1)
arm.set_gripper_position(10,wait=False,auto_enable=True,speed=900,timeout=10) #gripper close
| Python | 0.000012 | |
3ebe9ccebede38cc0638ef4adefe54fca306f2e6 | fix path | doc/conf.py | doc/conf.py | __license__ = """
Copyright 2012 DISQUS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -*- coding: utf-8 -*-
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from version import version
project = u'pykafka'
copyright = u'2015, Parse.ly'
version = release = version
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
exclude_patterns = ['_build']
html_static_path = ['_static']
source_suffix = '.rst'
master_doc = 'index'
html_theme = 'nature'
pygments_style = 'sphinx'
htmlhelp_basename = 'pykafkadoc'
autodoc_default_flags = ['special-members', 'undoc-members', 'private-members',
'show-inheritance']
| __license__ = """
Copyright 2012 DISQUS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -*- coding: utf-8 -*-
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from version import version
project = u'pykafka'
copyright = u'2015, Parse.ly'
version = release = version
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
exclude_patterns = ['_build']
html_static_path = ['_static']
source_suffix = '.rst'
master_doc = 'index'
html_theme = 'nature'
pygments_style = 'sphinx'
htmlhelp_basename = 'pykafkadoc'
autodoc_default_flags = ['special-members', 'show-inheritance']
| Python | 0.000017 |
965cd743ab1310455fe1ee24c5fa0ae0ae97bd7a | Create MyTestProgram.py | MyTestProgram.py | MyTestProgram.py | #!/usr/bin/python
import RPIO, time
# John Scuteri's Button Board tester program
# This program is for John Jay's Button board
# This program was inspired by programs from the following website
# http://www.savagehomeautomation.com/projects/raspberry-pi-john-jays-8-led-button-breakout-board.html
# This program uses Python to make use of the Raspberry Pi's GPIO
# GPIO.RPI is replaced in this program with RPIO which need to be downloaded
# RPIO adds the resistor value modification
# As I am new at Python the following needs to be noted
# This program does not make use of arrays
# Future versions of this program will attempt to make use of arrays
# Setting resistor values for the switches
RPIO.setup(15, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(17, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(18, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(21, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(22, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(23, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(24, RPIO.IN, pull_up_down=RPIO.PUD_UP)
RPIO.setup(25, RPIO.IN, pull_up_down=RPIO.PUD_UP)
# Starting the LEDs
# LED 1
RPIO.setup(0, RPIO.OUT)
RPIO.output(0, RPIO.HIGH)
# LED 2
RPIO.setup(1, RPIO.OUT)
RPIO.output(1, RPIO.HIGH)
# LED 3
RPIO.setup(4, RPIO.OUT)
RPIO.output(4, RPIO.HIGH)
# LED 4
RPIO.setup(7, RPIO.OUT)
RPIO.output(7, RPIO.HIGH)
# LED 5
RPIO.setup(8, RPIO.OUT)
RPIO.output(8, RPIO.HIGH)
# LED 6
RPIO.setup(9, RPIO.OUT)
RPIO.output(9, RPIO.HIGH)
# LED 7
RPIO.setup(10, RPIO.OUT)
RPIO.output(10, RPIO.HIGH)
# LED 8
RPIO.setup(11, RPIO.OUT)
RPIO.output(11, RPIO.HIGH)
# Seed values
inputValue1 = True
inputValue2 = True
inputValue3 = True
inputValue4 = True
inputValue5 = True
inputValue6 = True
inputValue7 = True
inputValue8 = True
while True:
# Memories of past values
# these will be used for making sure the button push
# only registers once per press
hold1 = inputValue1
hold2 = inputValue2
hold3 = inputValue3
hold4 = inputValue4
hold5 = inputValue5
hold6 = inputValue6
hold7 = inputValue7
hold8 = inputValue8
inputValue1 = RPIO.input(15)
# ^ Gets the input value from Pin 15
if (inputValue1 == False):
if (hold1 == True):
# ^ Tests for previous value to make sure it registers once
print("Button 1 pressed ")
RPIO.output(0, RPIO.LOW)
# ^ Turns the LED off
else:
RPIO.output(0, RPIO.HIGH)
# ^ Turns the LED back on
time.sleep(.3)
# ^ Prevents the Led from blinking really fast
# but creates the problem of the LED not turning
# back on if the button is pressed too fast
inputValue2 = RPIO.input(17)
if (inputValue2 == False):
if (hold2 == True):
print("Button 2 pressed ")
RPIO.output(1, RPIO.LOW)
else:
RPIO.output(1, RPIO.HIGH)
time.sleep(.3)
inputValue3 = RPIO.input(18)
if (inputValue3 == False):
if (hold3 == True):
print("Button 3 pressed ")
RPIO.output(4, RPIO.LOW)
else:
RPIO.output(4, RPIO.HIGH)
time.sleep(.3)
inputValue4 = RPIO.input(21)
if (inputValue4 == False):
if (hold4 == True):
print("Button 4 pressed ")
RPIO.output(7, RPIO.LOW)
else:
RPIO.output(7, RPIO.HIGH)
time.sleep(.3)
inputValue5 = RPIO.input(22)
if (inputValue5 == False):
if (hold5 == True):
print("Button 5 pressed ")
RPIO.output(8, RPIO.LOW)
else:
RPIO.output(8, RPIO.HIGH)
time.sleep(.3)
inputValue6 = RPIO.input(23)
if (inputValue6 == False):
if (hold6 == True):
print("Button 6 pressed ")
RPIO.output(9, RPIO.LOW)
else:
RPIO.output(9, RPIO.HIGH)
time.sleep(.3)
inputValue7 = RPIO.input(24)
if (inputValue7 == False):
if (hold7 == True):
print("Button 7 pressed ")
RPIO.output(10, RPIO.LOW)
else:
RPIO.output(10, RPIO.HIGH)
time.sleep(.3)
inputValue8 = RPIO.input(25)
if (inputValue8 == False):
if (hold8 == True):
print("Button 8 pressed ")
RPIO.output(11, RPIO.LOW)
else:
RPIO.output(11, RPIO.HIGH)
time.sleep(.3)
time.sleep(.01)
| Python | 0 | |
3cfd37f81708e1f3a1b69d6c310c7f93d32eb8ed | add script to generate artificial data | django_db_meter/generate_data.py | django_db_meter/generate_data.py | import random
import threading
from django.contrib.auth.models import User
from models import TestModel
def generate_queries():
u1 = User.objects.filter()
new_name = str(random.randint(0, 2000000))
if u1:
u1.update(first_name=new_name)
else:
u1 = User(username=new_name)
u1.save()
u1 = User.objects.filter(username=new_name)
if u1:
u1.first_name = new_name + 'hello'
u1.save()
users = [User(username=get_random_text()) for i in xrange(100)]
for user in users:
user.save()
t = TestModel.objects.filter(user=u1)
t = list(t)
for i in xrange(100):
t = TestModel.objects.filter()
t = list(t)
for i in xrange(len(users)):
random_user = random.choice(users)
t = TestModel(user=random_user)
t.save()
for i in xrange(100):
k = TestModel.objects.select_related('user')
k = list(k)
def get_random_text():
new_name = str(random.randint(0, 2000000))
return new_name
def main(concurrency=2):
ths = [threading.Thread(target=generate_queries) for i in
xrange(concurrency)]
for th in ths:
th.start()
for th in ths:
th.join()
| Python | 0.000002 | |
af88a37ed87b18941232a98f52fec001bd63b453 | Fix bug in CookieJar where QSettings expected str, but got QByteArray instead (#10) | python/pyphantomjs/cookiejar.py | python/pyphantomjs/cookiejar.py | '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PyQt4.QtCore import QSettings
from PyQt4.QtNetwork import QNetworkCookie, QNetworkCookieJar
class CookieJar(QNetworkCookieJar):
def __init__(self, parent, cookiesFile):
super(CookieJar, self).__init__(parent)
self.m_cookiesFile = cookiesFile
def setCookiesFromUrl(self, cookieList, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
settings.beginGroup(url.host())
for cookie in cookieList:
settings.setValue(str(cookie.name()), str(cookie.value()))
settings.sync()
return True
def cookiesForUrl(self, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
cookieList = []
settings.beginGroup(url.host())
for cname in settings.childKeys():
cookieList.append(QNetworkCookie(cname, settings.value(cname)))
return cookieList
| '''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PyQt4.QtCore import QSettings
from PyQt4.QtNetwork import QNetworkCookie, QNetworkCookieJar
class CookieJar(QNetworkCookieJar):
def __init__(self, parent, cookiesFile):
super(CookieJar, self).__init__(parent)
self.m_cookiesFile = cookiesFile
def setCookiesFromUrl(self, cookieList, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
settings.beginGroup(url.host())
for cookie in cookieList:
settings.setValue(cookie.name(), cookie.value())
settings.sync()
return True
def cookiesForUrl(self, url):
settings = QSettings(self.m_cookiesFile, QSettings.IniFormat)
cookieList = []
settings.beginGroup(url.host())
for cname in settings.childKeys():
cookieList.append(QNetworkCookie(cname, settings.value(cname)))
return cookieList
| Python | 0 |
eb34dd310cac0106070554c440b134d0baad6c8e | add a way to sequence animations into a new animation | vectortween/SequentialAnimation.py | vectortween/SequentialAnimation.py | from vectortween.Animation import Animation
from vectortween.Tween import Tween
from vectortween.Mapping import Mapping
from copy import deepcopy
from itertools import tee
import numpy as np
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def normalize(x):
return x / sum(x)
class SequentialAnimation(Animation):
def __init__(self, ListOfAnimations=[], timeweight=[], tween=['linear']):
super().__init__(None, None)
self.ListOfAnimations = []
self.ListOfAnimationTimeWeight = np.array([])
self.CumulativeNormalizedTimeWeights = np.array([])
self.T = Tween(*tween)
if ListOfAnimations:
if not timeweight:
for a in ListOfAnimations:
self.add(a, 1)
else:
for a,t in zip(ListOfAnimations,timeweight):
self.add(a, t)
def add(self, Anim, timeweight=1):
self.ListOfAnimations.append(deepcopy(Anim))
self.ListOfAnimationTimeWeight = np.append(self.ListOfAnimationTimeWeight, [timeweight])
self.CumulativeNormalizedTimeWeights = np.cumsum(normalize(self.ListOfAnimationTimeWeight))
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe):
if birthframe is None:
birthframe = startframe
if deathframe is None:
deathframe = stopframe
if frame < birthframe:
return None
if frame > deathframe:
return None
if frame < startframe:
return self.ListOfAnimations[0].make_frame(frame, birthframe, startframe, stopframe, deathframe)
if frame > stopframe:
return self.ListOfAnimations[-1].make_frame(frame, birthframe, startframe, stopframe, deathframe)
t = self.T.tween2(frame, startframe, stopframe)
for i, w in enumerate(self.CumulativeNormalizedTimeWeights):
if t <= w:
if i == 0: # reached the end of the cumulative weights
relativestartframe = 0
else:
relativestartframe = self.CumulativeNormalizedTimeWeights[i-1]
relativestopframe = self.CumulativeNormalizedTimeWeights[i]
absstartframe = Mapping.linlin(relativestartframe, 0, 1, startframe, stopframe)
absstopframe = Mapping.linlin(relativestopframe, 0, 1, startframe, stopframe)
return self.ListOfAnimations[i].make_frame(frame, birthframe, absstartframe, absstopframe, deathframe)
| Python | 0.000001 | |
d5b622e9fb855753630cd3a6fae1a315b4be1a08 | Add example using new pytorch backend | examples/dominant_eigenvector_pytorch.py | examples/dominant_eigenvector_pytorch.py | import numpy as np
import numpy.random as rnd
import numpy.linalg as la
import torch
from pymanopt import Problem
from pymanopt.tools import decorators
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
A_ = torch.from_numpy(A)
@decorators.pytorch
def cost(x):
return -x.matmul(A_.matmul(x))
problem = Problem(manifold=manifold, cost=cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
| Python | 0 | |
7a9cb703e776d91d4fc3c632b190bd7d318a12a6 | Create primary directions module | flatlib/predictives/primarydirections.py | flatlib/predictives/primarydirections.py | """
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
This module implements the Primary Directions
method.
"""
from flatlib import angle
from flatlib import utils
# === Base functions === #
def arc(pRA, pDecl, sRA, sDecl, mcRA, lat):
""" Returns the arc of direction between a Promissor
and Significator. It uses the generic proportional
semi-arc method.
"""
pDArc, pNArc = utils.dnarcs(pDecl, lat)
sDArc, sNArc = utils.dnarcs(sDecl, lat)
# Select meridian and arcs to be used
# Default is MC and Diurnal arcs
mdRA = mcRA
sArc = sDArc
pArc = pDArc
if not utils.isAboveHorizon(sRA, sDecl, mcRA, lat):
# Use IC and Nocturnal arcs
mdRA = angle.norm(mcRA + 180)
sArc = sNArc
pArc = pNArc
# Promissor and Significator distance to meridian
pDist = angle.closestdistance(mdRA, pRA)
sDist = angle.closestdistance(mdRA, sRA)
# Promissor should be after significator (in degrees)
if pDist < sDist:
pDist += 360
# Meridian distances proportional to respective semi-arcs
sPropDist = sDist / (sArc / 2.0)
pPropDist = pDist / (pArc / 2.0)
# The arc is how much of the promissor's semi-arc is
# needed to reach the significator
return (pPropDist - sPropDist) * (pArc / 2.0)
def getArc(prom, sig, mc, pos, zerolat):
""" Returns the arc of direction between a promissor
and a significator. Arguments are also the MC, the
geoposition and zerolat to assume zero ecliptical
latitudes.
ZeroLat true => inZodiaco, false => inMundo
"""
pRA, pDecl = prom.eqCoords(zerolat)
sRa, sDecl = sig.eqCoords(zerolat)
mcRa, mcDecl = mc.eqCoords()
return arc(pRA, pDecl, sRa, sDecl, mcRa, pos.lat) | Python | 0.000001 | |
9fbde5b8dd4d2555e03bc0b7915fc4e55f8333d9 | Add test to help module | numba/tests/test_help.py | numba/tests/test_help.py | from __future__ import print_function
import builtins
import types as pytypes
import numpy as np
from numba import types
from .support import TestCase
from numba.help.inspector import inspect_function, inspect_module
class TestInspector(TestCase):
def check_function_descriptor(self, info, must_be_defined=False):
self.assertIsInstance(info, dict)
self.assertIn('numba_type', info)
numba_type = info['numba_type']
if numba_type is None:
self.assertFalse(must_be_defined)
else:
self.assertIsInstance(numba_type, types.Type)
self.assertIn('explained', info)
self.assertIsInstance(info['explained'], str)
self.assertIn('source_infos', info)
self.assertIsInstance(info['source_infos'], dict)
def test_inspect_function_on_range(self):
info = inspect_function(range)
self.check_function_descriptor(info, must_be_defined=True)
def test_inspect_function_on_np_all(self):
info = inspect_function(np.all)
self.check_function_descriptor(info, must_be_defined=True)
source_infos = info['source_infos']
self.assertGreater(len(source_infos), 0)
c = 0
for srcinfo in source_infos.values():
self.assertIsInstance(srcinfo['kind'], str)
self.assertIsInstance(srcinfo['name'], str)
self.assertIsInstance(srcinfo['sig'], str)
self.assertIsInstance(srcinfo['filename'], str)
self.assertIsInstance(srcinfo['lines'], tuple)
self.assertIn('docstring', srcinfo)
c += 1
self.assertEqual(c, len(source_infos))
def test_inspect_module(self):
c = 0
for it in inspect_module(builtins):
self.assertIsInstance(it['module'], pytypes.ModuleType)
self.assertIsInstance(it['name'], str)
self.assertTrue(callable(it['obj']))
self.check_function_descriptor(it)
c += 1
self.assertGreater(c, 0)
| Python | 0 | |
e8607fce01bfe17c08de0702c4041d98504bc159 | Add migration for changing CONTACTED_CHOICES | reunition/apps/alumni/migrations/0006_auto_20150823_2030.py | reunition/apps/alumni/migrations/0006_auto_20150823_2030.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alumni', '0005_note'),
]
operations = [
migrations.AlterField(
model_name='note',
name='contacted',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'', b'No contact made'), (b'', b'---'), (b'email', b'Sent email'), (b'fb', b'Sent Facebook message'), (b'phone', b'Made phone call'), (b'text', b'Sent text message'), (b'other', b'Made other contact'), (b'', b'---'), (b'email-in', b'Received email'), (b'fb-in', b'Received Facebook message'), (b'phone-in', b'Received phone call'), (b'text-in', b'Received text message'), (b'other', b'Received other contact')]),
),
]
| Python | 0 | |
e280b3d270395fd39ae0164265fdf5960b16a822 | add new example for wexplore2 with gpu mapper | examples/sEH_TPPU/sEH_TPPU_WExplore2_GPU/we.py | examples/sEH_TPPU/sEH_TPPU_WExplore2_GPU/we.py | import pickle
import h5py
import numpy as np
import pandas as pd
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
import scoop.futures
import mdtraj as mdj
from wepy.sim_manager import Manager
from wepy.resampling.wexplore2 import WExplore2Resampler
from wepy.openmm import OpenMMRunner, OpenMMWalker
from wepy.boundary_conditions.unbinding import UnbindingBC
from wepy.reporter.hdf5 import WepyHDF5Reporter
from wepy.hdf5 import TrajHDF5
from wepy.work_mapper.gpu import GpuMapper
def make_initial_minimized_state():
psf = omma.CharmmPsfFile('../sEH_TPPU_system.psf')
# load the coordinates
pdb = mdj.load_pdb('../sEH_TPPU_system.pdb')
# to use charmm forcefields get your parameters
params = omma.CharmmParameterSet('../all36_cgenff.rtf',
'../all36_cgenff.prm',
'../all36_prot.rtf',
'../all36_prot.prm',
'../tppu.str',
'../toppar_water_ions.str')
# set the box size lengths and angles
psf.setBox(82.435, 82.435, 82.435, 90, 90, 90)
# create a system using the topology method giving it a topology and
# the method for calculation
system = psf.createSystem(params,
nonbondedMethod=omma.CutoffPeriodic,
nonbondedCutoff=1.0 * unit.nanometer,
constraints=omma.HBonds)
topology = psf.topology
print("\nminimizing\n")
# set up for a short simulation to minimize and prepare
# instantiate an integrator
integrator = omm.LangevinIntegrator(300*unit.kelvin,
1/unit.picosecond,
0.002*unit.picoseconds)
# instantiate a OpenCL platform, in other platforms we can not have multiple simulation context
platform = omm.Platform.getPlatformByName('OpenCL')
# instantiate a simulation object
simulation = omma.Simulation(psf.topology, system, integrator,platform)
# initialize the positions
simulation.context.setPositions(pdb.openmm_positions(frame=0))
# minimize the energy
simulation.minimizeEnergy()
# run the simulation for a number of initial time steps
simulation.step(1000)
print("done minimizing\n")
# get the initial state from the context
minimized_state = simulation.context.getState(getPositions=True,
getVelocities=True,
getParameters=True)
print ('finished initialization')
return minimized_state, system, psf, pdb
if __name__ == "__main__":
#### SETUP -----------------------------------------
# load a json string of the topology
with open("../sEH_TPPU_system.top.json", mode='r') as rf:
sEH_TPPU_system_top_json = rf.read()
# load the pdb: this topology (for now) is needed in the WEXplore2
# resampler which uses OpenMM to compute RMSDs and distances
# through periodic boundary conditions
pdb = mdj.load_pdb('../sEH_TPPU_system.pdb')
# load the openmm state that is used to set the state of the
# OpenMMWalker
with open("../initial_openmm_state.pkl", mode='rb') as rf:
omm_state = pickle.load(rf)
# selecting ligand and protein binding site atom indices for
# resampler and boundary conditions
pdb = pdb.remove_solvent()
lig_idxs = pdb.topology.select('resname "2RV"')
atom_idxs = [atom.index for atom in pdb.topology.atoms]
protein_idxs = np.delete(atom_idxs, lig_idxs)
# selects protien atoms which have less than 2.5 A from ligand
# atoms in the crystal structure
binding_selection_idxs = mdj.compute_neighbors(pdb, 0.8, lig_idxs)
binding_selection_idxs = np.delete(binding_selection_idxs, lig_idxs)
# minimize the system
minimized_state, system, psf, pdb = make_initial_minimized_state()
# set the string identifier for the platform to be used by openmm
platform = 'CUDA'
#### END SETUP -----------------------------------------------------------------
# set up the OpenMMRunner with your system
runner = OpenMMRunner(system, psf.topology, platform=platform)
# set up parameters for running the simulation
num_walkers = 3
# initial weights
init_weight = 1.0 / num_walkers
# a list of the initial walkers
init_walkers = [OpenMMWalker(omm_state, init_weight) for i in range(num_walkers)]
# set up the WExplore2 Resampler with the parameters
resampler = WExplore2Resampler(topology=pdb.top,
ligand_idxs=lig_idxs,
binding_site_idxs=binding_selection_idxs,
# algorithm parameters
pmax=0.1)
# makes ref_traj and selects lingand_atom and protein atom indices
# instantiate a wexplore2 unbindingboudaryconditiobs
ubc = UnbindingBC(cutoff_distance=1.0,
initial_state=init_walkers[0],
topology=pdb.topology,
ligand_idxs=lig_idxs,
binding_site_idxs=protein_idxs)
# instantiate a reporter for HDF5
report_path = 'wepy_results.h5'
reporter = WepyHDF5Reporter(report_path, mode='w',
decisions=resampler.DECISION,
instruction_dtypes=resampler.INSTRUCTION_DTYPES,
resampling_aux_dtypes=None,
resampling_aux_shapes=None,
warp_dtype=ubc.WARP_INSTRUCT_DTYPE,
warp_aux_dtypes=ubc.WARP_AUX_DTYPES,
warp_aux_shapes=ubc.WARP_AUX_SHAPES,
topology=sEH_TPPU_system_top_json)
# instantiate a hpcc object
#TODO change the num_workers to the list of GUP indicies are available
num_workers = 3
gpumapper = GpuMapper(num_walkers, num_workers)
# Instantiate a simulation manager
sim_manager = Manager(init_walkers,
runner=runner,
resampler=resampler,
boundary_conditions=ubc,
work_mapper=gpumapper.map,
reporter=reporter)
n_steps = 100
n_cycles = 2
# run a simulation with the manager for n_steps cycles of length 1000 each
steps = [ n_steps for i in range(n_cycles)]
print("Running simulation")
sim_manager.run_simulation(n_cycles,
steps,
debug_prints=True)
# your data should be in the 'wepy_results.h5'
| Python | 0 | |
5db1a4c8c721a0acffa6e903c5eef9b84ebfd0d3 | rename example to avoid namespace problem | examples/tutorials/scipy2008/traits_example.py | examples/tutorials/scipy2008/traits_example.py |
from numpy import linspace, sin
from enable.api import ColorTrait
from chaco.api import ArrayPlotData, Plot, marker_trait
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, Int
from traitsui.api import Group, Item, View
class ScatterPlotTraits(HasTraits):
plot = Instance(Plot)
color = ColorTrait("blue")
marker = marker_trait
marker_size = Int(4)
traits_view = View(
Group(Item('color', label="Color", style="custom"),
Item('marker', label="Marker"),
Item('marker_size', label="Size"),
Item('plot', editor=ComponentEditor(), show_label=False),
orientation = "vertical"),
width=800, height=600, resizable=True,
title="Chaco Plot"
)
def __init__(self):
# Create the data and the PlotData object
x = linspace(-14, 14, 100)
y = sin(x) * x**3
plotdata = ArrayPlotData(x = x, y = y)
# Create a Plot and associate it with the PlotData
plot = Plot(plotdata)
# Create a line plot in the Plot
self.renderer = plot.plot(("x", "y"), type="scatter", color="blue")[0]
self.plot = plot
def _color_changed(self):
self.renderer.color = self.color
def _marker_changed(self):
self.renderer.marker = self.marker
def _marker_size_changed(self):
self.renderer.marker_size = self.marker_size
#===============================================================================
# demo object that is used by the demo.py application.
#===============================================================================
demo = ScatterPlotTraits()
if __name__ == "__main__":
demo.configure_traits()
| Python | 0 | |
212d19c29a42bd6966965b166cdbb4dd642e5eb4 | Add test-cases for `get_user_membership` | wqflask/tests/unit/wqflask/test_resource_manager.py | wqflask/tests/unit/wqflask/test_resource_manager.py | """Test cases for wqflask/resource_manager.py"""
import unittest
from unittest import mock
from wqflask.resource_manager import get_user_membership
class TestGetUserMembership(unittest.TestCase):
"""Test cases for `get_user_membership`"""
def setUp(self):
conn = mock.MagicMock()
conn.hgetall.return_value = {
'7fa95d07-0e2d-4bc5-b47c-448fdc1260b2': (
'{"name": "editors", '
'"admins": ["8ad942fe-490d-453e-bd37-56f252e41604", "rand"], '
'"members": ["8ad942fe-490d-453e-bd37-56f252e41603", '
'"rand"], '
'"changed_timestamp": "Oct 06 2021 06:39PM", '
'"created_timestamp": "Oct 06 2021 06:39PM"}')}
self.conn = conn
def test_user_is_group_member_only(self):
"""Test that a user is only a group member"""
self.assertEqual(
get_user_membership(
conn=self.conn,
user_id="8ad942fe-490d-453e-bd37-56f252e41603",
group_id="7fa95d07-0e2d-4bc5-b47c-448fdc1260b2"),
{"member": True,
"admin": False})
def test_user_is_group_admin_only(self):
"""Test that a user is a group admin only"""
self.assertEqual(
get_user_membership(
conn=self.conn,
user_id="8ad942fe-490d-453e-bd37-56f252e41604",
group_id="7fa95d07-0e2d-4bc5-b47c-448fdc1260b2"),
{"member": False,
"admin": True})
def test_user_is_both_group_member_and_admin(self):
"""Test that a user is both an admin and member of a group"""
self.assertEqual(
get_user_membership(
conn=self.conn,
user_id="rand",
group_id="7fa95d07-0e2d-4bc5-b47c-448fdc1260b2"),
{"member": True,
"admin": True})
| Python | 0.000002 | |
3bdbc33e94a601f5d903bd32caf5ad7698fc025e | Fix zulip.com hardcoding. | zerver/management/commands/initialize_voyager_db.py | zerver/management/commands/initialize_voyager_db.py | from __future__ import absolute_import
from typing import Any, Iterable, Tuple
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from zerver.models import UserProfile, Stream, Recipient, \
Subscription, Realm, get_client, email_to_username
from django.conf import settings
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.actions import set_default_streams, do_create_realm
from optparse import make_option
from six import text_type
settings.TORNADO_SERVER = None
def create_users(name_list, bot_type=None):
# type: (Iterable[Tuple[text_type, text_type]], int) -> None
realms = {}
for realm in Realm.objects.all():
realms[realm.domain] = realm
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realms, user_set, bot_type)
class Command(BaseCommand):
help = "Populate an initial database for Zulip Voyager"
option_list = BaseCommand.option_list + (
make_option('--extra-users',
dest='extra_users',
type='int',
default=0,
help='The number of extra users to create'),
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
Realm.objects.create(domain=settings.INTERNAL_BOT_DOMAIN)
names = [(settings.FEEDBACK_BOT_NAME, settings.FEEDBACK_BOT)]
create_users(names, bot_type=UserProfile.DEFAULT_BOT)
get_client("website")
get_client("API")
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(internal_bots, bot_type=UserProfile.DEFAULT_BOT)
# Set the owners for these bots to the bots themselves
bots = UserProfile.objects.filter(email__in=[bot_info[1] for bot_info in internal_bots])
for bot in bots:
bot.bot_owner = bot
bot.save()
# Initialize the email gateway bot as an API Super User
email_gateway_bot = UserProfile.objects.get(email__iexact=settings.EMAIL_GATEWAY_BOT)
email_gateway_bot.is_api_super_user = True
email_gateway_bot.save()
(admin_realm, _) = do_create_realm(settings.ADMIN_DOMAIN,
settings.ADMIN_DOMAIN, True)
set_default_streams(admin_realm, settings.DEFAULT_NEW_REALM_STREAMS)
self.stdout.write("Successfully populated database with initial data.\n")
site = Site.objects.get_current()
site.domain = settings.EXTERNAL_HOST
site.save()
| from __future__ import absolute_import
from typing import Any, Iterable, Tuple
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from zerver.models import UserProfile, Stream, Recipient, \
Subscription, Realm, get_client, email_to_username
from django.conf import settings
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.actions import set_default_streams, do_create_realm
from optparse import make_option
from six import text_type
settings.TORNADO_SERVER = None
def create_users(name_list, bot_type=None):
# type: (Iterable[Tuple[text_type, text_type]], int) -> None
realms = {}
for realm in Realm.objects.all():
realms[realm.domain] = realm
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realms, user_set, bot_type)
class Command(BaseCommand):
help = "Populate an initial database for Zulip Voyager"
option_list = BaseCommand.option_list + (
make_option('--extra-users',
dest='extra_users',
type='int',
default=0,
help='The number of extra users to create'),
)
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
Realm.objects.create(domain="zulip.com")
names = [(settings.FEEDBACK_BOT_NAME, settings.FEEDBACK_BOT)]
create_users(names, bot_type=UserProfile.DEFAULT_BOT)
get_client("website")
get_client("API")
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(internal_bots, bot_type=UserProfile.DEFAULT_BOT)
# Set the owners for these bots to the bots themselves
bots = UserProfile.objects.filter(email__in=[bot_info[1] for bot_info in internal_bots])
for bot in bots:
bot.bot_owner = bot
bot.save()
# Initialize the email gateway bot as an API Super User
email_gateway_bot = UserProfile.objects.get(email__iexact=settings.EMAIL_GATEWAY_BOT)
email_gateway_bot.is_api_super_user = True
email_gateway_bot.save()
(admin_realm, _) = do_create_realm(settings.ADMIN_DOMAIN,
settings.ADMIN_DOMAIN, True)
set_default_streams(admin_realm, settings.DEFAULT_NEW_REALM_STREAMS)
self.stdout.write("Successfully populated database with initial data.\n")
site = Site.objects.get_current()
site.domain = settings.EXTERNAL_HOST
site.save()
| Python | 0 |
b2febfd4b52c1e50be4d8ba614adcbe4d59251d8 | Add blank init file | SDK/__init__.py | SDK/__init__.py | Python | 0 | ||
fc58a85131675672ccef2302038cc55c9e4b0460 | Migrate products | c2corg_api/scripts/migration/documents/products.py | c2corg_api/scripts/migration/documents/products.py | from c2corg_api.models.document import DocumentGeometry, \
ArchiveDocumentGeometry
from c2corg_api.models.waypoint import Waypoint, ArchiveWaypoint, \
WaypointLocale, ArchiveWaypointLocale
from c2corg_api.scripts.migration.documents.document import MigrateDocuments
class MigrateProducts(MigrateDocuments):
def get_name(self):
return 'products'
def get_model_document(self, locales):
return WaypointLocale if locales else Waypoint
def get_model_archive_document(self, locales):
return ArchiveWaypointLocale if locales else ArchiveWaypoint
def get_model_geometry(self):
return DocumentGeometry
def get_model_archive_geometry(self):
return ArchiveDocumentGeometry
def get_count_query(self):
return (
'select count(*) from app_products_archives;'
)
def get_query(self):
return (
'select '
' id, document_archive_id, is_latest_version, elevation, '
' is_protected, redirects_to, '
' ST_Force2D(ST_SetSRID(geom, 3857)) geom, '
' product_type, url '
'from app_products_archives '
'order by id, document_archive_id;'
)
def get_count_query_locales(self):
return (
'select count(*) from app_products_i18n_archives;'
)
def get_query_locales(self):
return (
'select '
' id, document_i18n_archive_id, is_latest_version, culture, '
' name, description, hours, access '
'from app_products_i18n_archives '
'order by id, document_i18n_archive_id;'
)
def get_document(self, document_in, version):
return dict(
document_id=document_in.id,
version=version,
waypoint_type='local_product',
elevation=document_in.elevation,
product_types=self.convert_types(
document_in.product_type,
MigrateProducts.product_types, [0]),
url=document_in.url
)
def get_document_archive(self, document_in, version):
doc = self.get_document(document_in, version)
doc['id'] = document_in.document_archive_id
return doc
def get_document_geometry(self, document_in, version):
return dict(
document_id=document_in.id,
id=document_in.id,
version=version,
geom=document_in.geom
)
def get_document_geometry_archive(self, document_in, version):
doc = self.get_document_geometry(document_in, version)
doc['id'] = document_in.document_archive_id
return doc
def get_document_locale(self, document_in, version):
# TODO extract summary
return dict(
document_id=document_in.id,
id=document_in.document_i18n_archive_id,
version=version,
culture=document_in.culture,
title=document_in.name,
description=document_in.description,
access=document_in.access,
access_period=document_in.hours
)
def get_document_locale_archive(self, document_in, version):
return self.get_document_locale(document_in, version)
product_types = {
'1': 'farm_sale',
'2': 'restaurant',
'3': 'grocery',
'4': 'bar',
'5': 'sport_shop'
}
| Python | 0.000002 | |
9719189501f8b0fcff186b1bc2130fcef8d21e8d | add movie scraper | scrape_rotten/scrape_rotten/spiders/movie_spider.py | scrape_rotten/scrape_rotten/spiders/movie_spider.py | import scrapy
def get_urls():
# load from file
with open('movie_urls.json') as f:
return [line.rstrip() for line in f]
class MovieSpider(scrapy.Spider):
name = 'movies'
start_urls = get_urls()
def meta_property(self, response, prop):
return response.xpath("//meta[@property='{}']/@content".format(prop)).extract()
def parse(self, response):
data = {'url': response.url}
movie_url_handle = response.url.split('/')
poster_url = response.css('img.posterImage::attr(src)').extract()
movie_title = self.meta_property(response, 'og:title')
description = self.meta_property(response, 'og:description')
rotten_id = self.meta_property(response, 'movieID')
year = response.css("h1#movie-title").xpath('span/text()').extract()
if movie_url_handle:
data['movie_url_handle'] = movie_url_handle[-1]
if poster_url:
data['poster_url'] = poster_url[0]
if movie_title:
data['movie_title'] = movie_title[0]
if description:
data['description'] = description[0]
if rotten_id:
data['rt_id'] = rotten_id[0]
if year:
data['year'] = year[0].replace('(', '').replace(')', '').strip()
yield data
| Python | 0.000009 | |
052832a766e296a3444cb7afd5b5a930013d18d6 | Create z04-convolutional-neural-network.py | skflow-examples/z04-convolutional-neural-network.py | skflow-examples/z04-convolutional-neural-network.py | # http://terrytangyuan.github.io/2016/03/14/scikit-flow-intro/
# Loading MNIST data
mnist = input_data.read_data_sets('MNIST_data')
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def conv_model(X, y):
# reshape X to 4d tensor with 2nd and 3rd dimensions being image width and height
# final dimension being the number of color channels
X = tf.reshape(X, [-1, 28, 28, 1])
# first conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = skflow.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 features for each 5x5 patch
with tf.variable_scope('conv_layer2'):
h_conv2 = skflow.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 1024 neurons
h_fc1 = skflow.ops.dnn(h_pool2_flat, [1024], activation=tf.nn.relu, keep_prob=0.5)
return skflow.models.logistic_regression(h_fc1, y)
# Training and predicting
classifier = skflow.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
learning_rate=0.001)
| Python | 0.000003 | |
d76572b47d6b7657274617b129abc6890c503e1e | Add RIP packet type | pox/lib/packet/rip.py | pox/lib/packet/rip.py | # Copyright 2012 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# RIP Message Format
#
# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Command | Version | Zero |
# +---------------+---------------+-------------------------------+
# | |
# / RIP Entry (20 bytes) /
# / /
# +---------------------------------------------------------------+
#
#
# RIP Entry
#
# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Address Family | Route Tag * |
# +-------------------------------+-------------------------------+
# | IP Address |
# +---------------------------------------------------------------+
# | Subnet Mask * |
# +---------------------------------------------------------------+
# | Next Hop * |
# +---------------------------------------------------------------+
# | Metric |
# +---------------------------------------------------------------+
#
# * RIP v2 only -- all zeros in RIP v1
#
#======================================================================
import struct
from packet_utils import *
from packet_base import packet_base
from pox.lib.addresses import *
# RIP v2 multicast address
RIP2_ADDRESS = IPAddr("224.0.0.9")
# RIP v1/v2 UDP port
RIP_PORT = 520
RIP_REQUEST = 1
RIP_RESPONSE = 2
class rip (packet_base):
"""
RIP Message
"""
MIN_LEN = 24
RIP_PORT = RIP_PORT
RIP2_ADDRESS = RIP2_ADDRESS
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.entries = []
self.command = 0
self.version = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def hdr (self, payload):
s = struct.pack("!BBH", self.command, self.version, 0)
for e in self.entries:
s += e.pack()
return s
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('RIP packet data too short to parse')
return None
self.command, self.version, z = struct.unpack("!BBH", raw[:4])
if z != 0:
self.err("Zero field in RIP message not zero!")
return None
self.entries = []
raw = raw[4:]
while len(raw) >= 20:
try:
self.entries.append(RIPEntry(raw=raw[0:20]))
except Exception, e:
self.err('Exception parsing RIP entries: ' + str(e))
return None
raw = raw[20:]
if len(raw) != 0:
self.err('RIP had partial entry? %s bytes left' % (len(raw),))
self.parsed = True
def __str__ (self):
cmd = {RIP_REQUEST:"REQ",RIP_RESPONSE:"RESP"}.get(self.command,
str(self.command))
s = "[RIP ver:%i cmd:%s num:%i|" % (self.version,
cmd, len(self.entries))
for e in self.entries:
s += str(e) + "|"
s = s[:-1] + "]"
return s
RIPMessage = rip
class RIPEntry (packet_base):
def __init__ (self, raw=None, prev=None, **kw):
#TODO: netmask initializer?
packet_base.__init__(self)
self.address_family = 0
self.route_tag = 0
self.ip = None # IPAddr; bad default is to force setting
self._netmask = 0 # An IPAddr, but netmask property lets you assign a
# dotquad string or an integer number of bits.
self.next_hop = IP_ANY
self.metric = 0
if raw is not None:
self.parse(raw)
self._init(kw)
@property
def netmask (self):
return self._netmask
@netmask.setter
def netmask (self, netmask):
if isinstance(netmask, int):
netmask = cidr_to_netmask(netmask)
elif not isintance(netmask, IPAddr):
netmask = IPAddr(netmask)
self._netmask = netmask
@property
def network_bits (self):
"""
Returns the number of network bits. May raise an exception
if the netmask is not CIDR-compatible.
"""
return netmask_to_cidr(self._netmask)
@network_bits.setter
def network_bits (self, bits):
self._netmask = cidr_to_netmask(bits)
def hdr (self, payload):
s = struct.pack("!HHiiii", self.address_family, self.route_tag,
self.ip.toSigned(networkOrder=False),
self.netmask.toSigned(networkOrder=False),
self.next_hop.toSigned(networkOrder=False),
self.metric)
return s
def parse (self, raw):
self.address_family, self.route_tag, ip, netmask, next_hop, self.metric \
= struct.unpack("!HHiiii", raw)
self.ip = IPAddr(ip, networkOrder = False)
self._netmask = IPAddr(netmask, networkOrder = False)
self.next_hop = IPAddr(next_hop, networkOrder = False)
def __str__ (self):
s = "tag:%s ip:%s/%s nh:%s m:%s" % (self.route_tag, self.ip,
self._netmask, self.next_hop, self.metric)
return s
| Python | 0.000002 | |
b9ff8fc06f9bd55721332831d4ce23589d93fafb | Create 3Sum.py | leetcode/15.-3Sum/3Sum.py | leetcode/15.-3Sum/3Sum.py | class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
sortnum = sorted(nums)
length = len(sortnum)
# make sure a < b < c
for i in xrange(length-2):
a = sortnum[i]
# remove duplicate a
if i >= 1 and a == sortnum[i-1]:
continue
j = i + 1
k = length - 1
while j < k:
b = sortnum[j]
c = sortnum[k]
if b + c == -a:
res.append([a,b,c])
# remove duplicate b,c
while j < k:
j += 1
k -= 1
if sortnum[j] != b or sortnum[k] != c:
break
elif b + c > -a:
# remove duplicate c
while j < k:
k -= 1
if sortnum[k] != c:
break
else:
# remove duplicate b
while j < k:
j += 1
if sortnum[j] != b:
break
return res
| Python | 0.000002 | |
80bf107b29f51456f778da718ef438fd62545b1b | Add server test file | pi_approach/UI/server.py | pi_approach/UI/server.py | import socket
HOST = socket.gethostname() + '.local' # Server IP or Hostname
PORT = 12345 # Pick an open Port (1000+ recommended), must match the client sport
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
#managing error exception
try:
s.bind((HOST, PORT))
print "Opened"
except socket.error:
print 'Bind failed '
s.listen(5)
print 'Socket awaiting messages'
(conn, addr) = s.accept()
print 'Connected'
# awaiting for message
while True:
data = conn.recv(1024)
print 'I sent a message back in response to: ' + data
reply = ''
# process your message
if data == 'Hello':
reply = 'Hi, back!'
elif data == 'This is important':
reply = 'OK, I have done the important thing you have asked me!'
#and so on and on until...
elif data == 'quit':
conn.send('Terminating')
break
else:
reply = 'Unknown command'
# Sending reply
conn.send(reply)
conn.close() # Close connections
| Python | 0.000001 | |
64577b7eb445a62f4e8348d687fa6ed7ed5401ed | Add migrations to user_profile app. | localtv/user_profile/migrations/0001_initial.py | localtv/user_profile/migrations/0001_initial.py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('localtv', '0008_add_profile'),
)
def forwards(self, orm):
pass # this is handled by 0008_add_profile
def backwards(self, orm):
pass # this is handled by 0008_add_profile
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'user_profile.profile': {
'Meta': {'object_name': 'Profile', 'db_table': "'localtv_profile'"},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['user_profile']
| Python | 0 | |
691b27a4d97d5c2966f1627ed6cc5870024537c0 | add bouncy example | 06-animation/bouncy.py | 06-animation/bouncy.py | def setup():
size(300,300)
# ball properties
global rad, d, pos, vel, grav
rad = 25 # radius of the ball
pos = PVector( 150, 50 ) # initial position of the ball
vel = PVector( random(-3,3), random(-3,3) ) # velocity of the balll
grav = PVector( 0, 0.9 ) # force on the ball (gravity)
d = 0.97 # how much bounce?
def draw():
""" update the ball's state and draw it every frame """
global rad, d, pos, vel, grav
# update the velocity with the force
vel.add(grav)
# update the position with the velocity
pos.add(vel)
# deal with wall collisions
if(pos.y > height-rad): # floor collision
pos.y = height-rad
vel.y = -vel.y
vel.mult(d)
if(pos.x < rad): # left wall collision
pos.x = rad
vel.x = -vel.x
vel.mult(d)
if(pos.x > width-rad): # right wall collision
pos.x = width-rad
vel.x = -vel.x
vel.mult(d)
# draw the scene
background(150) # refresh the background
strokeWeight(2)
fill(20,160,240)
ellipse( pos.x, pos.y, rad*2, rad*2) # draw the ball
def mousePressed():
""" If the ball is clicked, add a random velocity. """
global rad, pos
if( dist(mouseX,mouseY,pos.x,pos.y) < rad ):
vel.add( PVector(random(-3,3), random(10,20)) )
| Python | 0.000007 | |
1f6e225a1b01e8eb4cd9f1d5da05455d85326064 | Validate ck_user_has_mobile_or_other_auth constraint | migrations/versions/0357_validate_constraint.py | migrations/versions/0357_validate_constraint.py | """
Revision ID: 0357_validate_constraint
Revises: 0356_add_webautn_auth_type
Create Date: 2021-05-13 14:15:25.259991
"""
from alembic import op
revision = '0357_validate_constraint'
down_revision = '0356_add_webautn_auth_type'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('ALTER TABLE users VALIDATE CONSTRAINT "ck_user_has_mobile_or_other_auth"')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| Python | 0 | |
8387b0289e84ededdd9ba3db5ba47f149b918530 | clean up batch submit script | dnanexus/dx_batch.py | dnanexus/dx_batch.py | #!/usr/bin/env python
import argparse
import os
import sys
import subprocess
import dxpy
import requests
from dxencode import dxencode as dxencode
SERVER = 'https://www.encodeproject.org'
ASSAY_TYPE = 'whole genome bisulfite sequencing'
ASSAY_TERM_ID = 'OBI:0001863'
HEADERS = {'content-type': 'application/json'}
def get_args():
'''Parse the input arguments.'''
ap = argparse.ArgumentParser(description='Set up DNA Methylation runs on DNA Nexus')
ap.add_argument('-t', '--test',
help='Use test input folder',
action='store_true',
required=False)
ap.add_argument('-n', '--numberjobs',
help='Maximum Number of jobs to run',
type=int,
required=False)
return ap.parse_args()
def main():
cmnd = get_args()
## resolve projects
(AUTHID, AUTHPW, SERVER) = dxencode.processkey('www')
query = '/search/?type=experiment&assay_term_id=%s&award.rfa=ENCODE3&limit=all&files.file_format=fastq&frame=embedded&replicates.library.biosample.donor.organism.name=mouse' % ASSAY_TERM_ID
res = requests.get(SERVER+query, headers=HEADERS, auth=(AUTHID, AUTHPW),allow_redirects=True, stream=True)
exps = res.json()['@graph']
n=0
for exp in exps:
acc = exp['accession']
if n >= cmnd.numberjobs:
print "Stopping at %s replicates" % n
break
for rep in exp.get('replicates', []):
try:
runcmd = "./launchDnaMe.py --gzip -e %s --br %s --tr %s > runs/launch%s-%s-%s.out" % (acc, rep['biological_replicate_number'], rep['technical_replicate_number'],acc, rep['biological_replicate_number'], rep['technical_replicate_number'])
print runcmd
if not cmnd.test:
os.system(runcmd)
n+=1
except KeyError, e:
print "%s failed: %s" % (acc, e)
if __name__ == '__main__':
main()
| Python | 0 | |
9e217f0641328e1dfce91cdffdb8b5d77e4fe8fa | Add segcnn | examples/human_sar/segcnn.py | examples/human_sar/segcnn.py | import os
import sys
import cPickle
import theano.tensor as T
homepath = os.path.join('..', '..')
if not homepath in sys.path:
sys.path.insert(0, homepath)
from dlearn.models.layer import FullConnLayer, ConvPoolLayer
from dlearn.models.nnet import NeuralNet
from dlearn.utils import actfuncs, costfuncs
from dlearn.optimization import sgd
def load_data():
with open('data.pkl', 'rb') as f:
dataset = cPickle.load(f)
return dataset
def load_attr_model():
with open('scpool.pkl', 'rb') as f:
attr_model = cPickle.load(f)
return attr_model
def train_model(dataset, attr_model):
X = T.tensor4()
A = T.matrix()
S = T.tensor3()
layers = []
layers.append(ConvPoolLayer(
input=X * S.dimshuffle(0, 'x', 1, 2),
input_shape=(3, 160, 80),
filter_shape=(32, 3, 5, 5),
pool_shape=(2, 2),
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[0]._W,
b=attr_model.blocks[0]._b
))
layers.append(ConvPoolLayer(
input=layers[-1].output,
input_shape=layers[-1].output_shape,
filter_shape=(64, 32, 5, 5),
pool_shape=(2, 2),
active_func=actfuncs.tanh,
flatten=True,
W=attr_model.blocks[0]._W,
b=attr_model.blocks[0]._b
))
layers.append(FullConnLayer(
input=layers[-1].output,
input_shape=layers[-1].output_shape,
output_shape=128,
dropout_ratio=0.1,
active_func=actfuncs.tanh
))
layers.append(FullConnLayer(
input=layers[-1].output,
input_shape=layers[-1].output_shape,
output_shape=37 * 17,
dropout_input=layers[-1].dropout_output,
active_func=actfuncs.sigmoid
))
model = NeuralNet(layers, [X, A], layers[-1].output)
model.target = S
model.cost = costfuncs.binxent(layers[-1].dropout_output, S.flatten(2)) + \
1e-3 * model.get_norm(2)
model.error = costfuncs.binerr(layers[-1].output, S.flatten(2))
model.consts = layers.blocks[0].parameters + layers.blocks[1].parameters
sgd.train(model, dataset, lr=1e-2, momentum=0.9,
batch_size=100, n_epochs=300,
epoch_waiting=10)
return model
def save_model(model):
with open('model_segcnn.pkl', 'wb') as f:
cPickle.dump(model, f, cPickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
dataset = load_data()
attr_model = load_attr_model()
model = train_model(dataset)
save_model(model)
| Python | 0.000084 | |
218df31e0f808aae75310b98dfe2c5cb7d87e7ed | introduce image slicer | postcards/slice_image.py | postcards/slice_image.py | from PIL import Image
import os
from time import gmtime, strftime
import math
import sys
import logging
"""slice_image.py: Slice images into tiles."""
__author__ = "Andrin Bertschi. www.abertschi.ch"
LOGGER_NAME = 'slice_image'
logger = logging.getLogger(LOGGER_NAME)
def make_tiles(image, tile_width, tile_height):
"""
slice PIL image to tiles
:param image: PIL image
:param tile_width: target tile width
:param tile_height: target tile height
:return: 2d array of PIL images
"""
width_segments = math.floor(image.width / tile_width)
height_segments = math.floor(image.height / tile_height)
matrix = [[0 for i in range(width_segments)] for i in range(height_segments)]
for h in range(height_segments):
y_from = h * tile_height
y_to = y_from + tile_height
for w in range(width_segments):
x_from = w * tile_width
x_to = x_from + tile_width
frame = image.crop((x_from, y_from, x_to, y_to))
matrix[h][w] = frame
return matrix
def store_tiles(tiles, directory, basename=None):
"""
Store generated tiles to disk
:param tiles: a 2d array of PIL images, as created by #make_tiles function
:param directory: directory to store images
:param basename: basename of image, if none is set, default name is chosen
:return: nothing
"""
if not basename:
basename = strftime("cropped_%Y-%m-%d_%H-%M-%S", gmtime())
if not os.path.exists(directory):
os.makedirs(directory)
logger.debug('creating {}'.format(directory))
height = len(tiles)
width = len(tiles[0])
for h in range(height):
for w in range(width):
frame = tiles[h][w]
filename = basename + '_{}-{}.jpg'.format(h, w)
filepath = os.path.join(directory, filename)
logger.debug('storing {}'.format(filepath))
frame.save(filepath)
def _make_absolute_path(path):
if os.path.isabs(path):
return path
else:
return str(os.path.join(os.getcwd(), path))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(name)s (%(levelname)s): %(message)s')
logging.getLogger(LOGGER_NAME).setLevel(logging.DEBUG)
if len(sys.argv) < 4:
logger.error('wrong usage. call script python {} <image_path> <tile_width> <tile_height>'.format(sys.argv[0]))
exit(1)
image_path = _make_absolute_path(sys.argv[1])
tile_height = int(sys.argv[3])
tile_width = int(sys.argv[2])
if not os.path.isfile(image_path):
logger.error('file {} does not exist'.format(image_path))
exit(1)
file = open(image_path, 'rb')
with Image.open(file) as image:
cwd = os.getcwd()
basename = strftime("cropped_%Y-%m-%d_%H-%M-%S", gmtime())
directory = os.path.join(cwd, basename)
tiles = make_tiles(image, tile_width=tile_width, tile_height=tile_height)
store_tiles(tiles, directory)
| Python | 0.000003 | |
ef627493f87d60e404008b26fe13e816d492a333 | add a bluetooth test component that simply displays when a device move on the network | python/test_bluetooth.py | python/test_bluetooth.py | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Test bluetooth module that displays informations from
bluetooth and print a message when a bluetooth device
appears or disappears.
:author: Luc Libralesso
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.3
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
from pelix.ipopo.decorators import ComponentFactory, Instantiate, Requires, Validate
import logging
import herald.utils
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory("herald-bluetooth-test-factory")
@Requires('_discovery', herald.transports.bluetooth.BLUETOOTH_DISCOVERY_SERVICE)
@Instantiate('herald-bluetooth-test-test')
class BluetoothTest:
""" A simple Test bluetooth module that displays information from
bluetooth and print a message when a bluetooth device
appears or disappears.
"""
def __init__(self):
self._discovery = None
@Validate
def validate(self, _):
# ask to be notified when there is a new device in the bluetooth network
self._discovery.listen_new(lambda x: print(x+" appears"))
self._discovery.listen_del(lambda x: print(x+" disappears"))
print('LISTENING TO THE BLUETOOTH NETWORK !')
| Python | 0 | |
a1e3e275a81ff073fed226619bde23361230cfce | Add tests for packaging.tests.support (#12659). | Lib/packaging/tests/test_support.py | Lib/packaging/tests/test_support.py | import os
import tempfile
from packaging.dist import Distribution
from packaging.tests import support, unittest
class TestingSupportTestCase(unittest.TestCase):
def test_fake_dec(self):
@support.fake_dec(1, 2, k=3)
def func(arg0, *args, **kargs):
return arg0, args, kargs
self.assertEqual(func(-1, -2, k=-3), (-1, (-2,), {'k': -3}))
def test_TempdirManager(self):
files = {}
class Tester(support.TempdirManager, unittest.TestCase):
def test_mktempfile(self2):
tmpfile = self2.mktempfile()
files['test_mktempfile'] = tmpfile.name
self.assertTrue(os.path.isfile(tmpfile.name))
def test_mkdtemp(self2):
tmpdir = self2.mkdtemp()
files['test_mkdtemp'] = tmpdir
self.assertTrue(os.path.isdir(tmpdir))
def test_write_file(self2):
tmpdir = self2.mkdtemp()
files['test_write_file'] = tmpdir
self2.write_file((tmpdir, 'file1'), 'me file 1')
file1 = os.path.join(tmpdir, 'file1')
self.assertTrue(os.path.isfile(file1))
text = ''
with open(file1, 'r') as f:
text = f.read()
self.assertEqual(text, 'me file 1')
def test_create_dist(self2):
project_dir, dist = self2.create_dist()
files['test_create_dist'] = project_dir
self.assertTrue(os.path.isdir(project_dir))
self.assertIsInstance(dist, Distribution)
def test_assertIsFile(self2):
fd, fn = tempfile.mkstemp()
os.close(fd)
self.addCleanup(support.unlink, fn)
self2.assertIsFile(fn)
self.assertRaises(AssertionError, self2.assertIsFile, 'foO')
def test_assertIsNotFile(self2):
tmpdir = self2.mkdtemp()
self2.assertIsNotFile(tmpdir)
tester = Tester()
for name in ('test_mktempfile', 'test_mkdtemp', 'test_write_file',
'test_create_dist', 'test_assertIsFile',
'test_assertIsNotFile'):
tester.setUp()
try:
getattr(tester, name)()
finally:
tester.tearDown()
# check clean-up
if name in files:
self.assertFalse(os.path.exists(files[name]))
def test_suite():
return unittest.makeSuite(TestingSupportTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| Python | 0 | |
5a59b5b96e223da782cf683aabbf4e8371c883e1 | Add DHKE protocol | cryptos/dhke.py | cryptos/dhke.py | """
Implementation of the Diffie-Hellman Key Exchange Protocol
Usage:
# Setup
invoker = DHKEInvoker()
other = DHKEParty(invoker.get_param())
# Key exchange phase
other.receive_partial_key(invoker.get_partial_key())
invoker.receive_partial_key(other.get_partial_key())
# Check consistency
assert(invoker.get_key() == other.get_key)
"""
from .numt import randprime, modulo_exp
from random import randint
__author__ = 'Divyanshu Kakwani'
__license__ = 'MIT'
def DHKEparam_gen(primelen=10):
"""
Generates parameters for the DHKE Protocol
"""
prime = randprime(10**(primelen-1), 10**primelen)
alpha = randint(2, prime-2)
return (prime, alpha)
class DHKEParty:
"""
Represents a party involved in DHKE Protocol
"""
def __init__(self, param):
self.prime = param[0]
self.alpha = param[1]
self.secret = randint(2, self.prime-2)
self.Ka = modulo_exp(self.alpha, self.secret, self.prime)
self.Kb = None
def get_param(self):
return (self.prime, self.alpha)
def get_partial_key(self):
return self.Ka
def receive_partial_key(self, Kb):
self.Kb = Kb
self.final_key = modulo_exp(Kb, self.secret, self.prime)
def get_key(self):
if not self.Kb:
raise Exception('Partial key not received')
return self.final_key
class DHKEInvoker(DHKEParty):
"""
The party which invokes the DHKE Protocol. A DHKEInvoker
differs from a DHKEParty in that it has to generate the
DHKE parameters at the outset.
"""
def __init__(self):
param = DHKEparam_gen()
DHKEParty.__init__(self, param)
| Python | 0.000001 | |
088cd2ddb79bdd2a8dd68e2d7169484eea90fd1a | Add problem79.py | euler_python/problem79.py | euler_python/problem79.py | """
problem79.py
A common security method used for online banking is to ask the user for three
random characters from a passcode. For example, if the passcode was 531278, they
may ask for the 2nd, 3rd, and 5th characters; the expected reply would be: 317.
The text file, keylog.txt, contains fifty successful login attempts.
Given that the three characters are always asked for in order, analyse the file
so as to determine the shortest possible secret passcode of unknown length.
"""
from collections import defaultdict, deque
from itertools import dropwhile
def to_digits(num):
return map(int, str(num))
def to_num(digits):
return int(''.join(map(str, digits)))
# Use 'breadth-first tree search', inspired by Peter Norvig's version in AIMA.
def solve(codes):
# Store all relations specified in the codes in a dict. Each digit
# is mapped to those digits appearing after it.
after = defaultdict(set)
for code in codes:
a, b, c = to_digits(code)
after[a].add(b)
after[a].add(c)
after[b].add(c)
# We will use lists to represent nodes in the tree, each of which is
# a candidate solution. So, initialise the frontier to the possible
# starting values.
frontier = deque([x] for x in after)
while frontier:
node = frontier.popleft()
if goal_state(node, after):
return node
# Use the 'after' dict to find the values, x, reachable from the end of
# the current node. Child nodes are then node + [x].
frontier.extend(node + [x] for x in after[node[-1]])
def goal_state(node, after):
"""Check whether, for all the relations specified in the 'after' dict,
the node satisfies them."""
# For each key, x, in the 'after' dict, the values, y, in after[x] must
# exist after the first occurrence of x in the node.
return all(y in dropwhile(lambda dgt: dgt != x, node)
for x in after
for y in after[x])
def problem79():
with open("data/keylog.txt", "r") as f:
codes = [int(x) for x in f.readlines()]
solution = solve(codes)
return to_num(solution)
| Python | 0.000088 | |
a0e9ac222091619f41a4eed0cfb25c1653b8034d | add simple update script | cvxpy/utilities/cvxpy_upgrade.py | cvxpy/utilities/cvxpy_upgrade.py | import argparse
import re
# Captures row and column parameters; note the captured object should
# not be a keyword argument other than "cols" (hence the requirement that
# the captured group is followed by a comma, whitespace, or parentheses)
P_ROW_COL = r"(?:rows=)?(\w+),\s*(?:cols=)?(\w+)[\s,)]"
# A list of substitutions to make, with the first entry in each tuple the
# pattern and the second entry the substitution.
SUBST = [
# The shape is a single argument in CVXPY 1.0 (either a tuple or an int)
(r"Variable\(" + P_ROW_COL, r"Variable(shape=(\1, \2)"),
(r"Bool\(" + P_ROW_COL, r"Variable(shape=(\1, \2), boolean=True"),
(r"Int\(" + P_ROW_COL, r"Variable(shape=(\1, \2), integer=True"),
(r"Parameter\(" + P_ROW_COL, r"Parameter(shape=(\1, \2)"),
# Interpret 1D variables as 2D; code may depend upon 2D structure
(r"Variable\(([^,)]+)\)", r"Variable(shape=(\1,1))"),
(r"Bool\(([^,)]+)\)", r"Variable(shape=(\1,1), boolean=True)"),
(r"Int\(([^,)]+)\)", r"Variable(shape=(\1,1), integer=True)"),
(r"Parameter\(([^,)]+)\)", r"Parameter(shape=(\1,1))"),
# Update atom names
(r"sum_entries", "sum"),
(r"max_entries", "cummax"),
(r"max_elemwise", "max")
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""Upgrade cvxpy code to version 1.0
Usage:
python cvxpy_upgrade.py --infile foo.py --outfile bar.py
""")
parser.add_argument("--infile", dest="input_file",
help="The name of the file to upgrade.",
required=True)
parser.add_argument("--outfile", dest="output_file",
help="The output filename.",
required=True)
args = parser.parse_args()
with open(args.input_file, 'rU') as f:
code = f.read()
for pattern, subst in SUBST:
code = re.sub(pattern, subst, code)
with open(args.output_file, 'w') as f:
f.write(code)
| Python | 0 | |
5a99f676a5b0b55d0490c955cb9af42d9121192d | Initialize database transactions | app/database.py | app/database.py | """This module initialises ."""
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from config.config import Config
engine = create_engine(Config.DATABASE_URI, convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
"""."""
import app.models
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
| Python | 0 | |
0ee023d29f613f718f5b88c158b120adb8b2fe2e | add new package (#16289) | var/spack/repos/builtin/packages/py-murmurhash/package.py | var/spack/repos/builtin/packages/py-murmurhash/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyMurmurhash(PythonPackage):
"""Cython bindings for MurmurHash."""
homepage = "https://github.com/explosion/murmurhash"
url = "https://pypi.io/packages/source/m/murmurhash/murmurhash-1.0.2.tar.gz"
version('1.0.2', sha256='c7a646f6b07b033642b4f52ae2e45efd8b80780b3b90e8092a0cec935fbf81e2')
depends_on('py-setuptools', type='build')
depends_on('py-wheel@0.32.0:0.32.999', type='build')
| Python | 0 | |
ab1c64e4af25920b31868a9a9f168e76bdf386c6 | Move object collection code to own module. | pytest_wish/collection.py | pytest_wish/collection.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2016 Alessandro Amici
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# python 2 support via python-future
from __future__ import absolute_import, unicode_literals
from builtins import str
import importlib
import inspect
import logging
import re
import sys
import pkg_resources
import stdlib_list
from pytest_wish import blacklists
EXCLUDE_PATTERNS = ['_|.*[.:]_'] # skip private modules and objects underscore-names
NOMATCH_REGEX = r'.\A' # unmatchable condition even in re.MULTILINE mode
# regex representation of blacklists
MODULE_BLACKLIST_PATTERN = '|'.join(blacklists.MODULE_BLACKLIST) or NOMATCH_REGEX
OBJECT_BLACKLIST_PATTERN = '|'.join(blacklists.OBJECT_BLACKLIST) or NOMATCH_REGEX
logger = logging.getLogger('wish')
def collect_stdlib_distributions():
# use Python long version number in distribution_spec
distribution_spec = 'Python==%d.%d.%d' % sys.version_info[:3]
# use Python short version number for stdlib_list as it supports only a few long versions
distribution_module_names = stdlib_list.stdlib_list('%d.%d' % sys.version_info[:2])
yield distribution_spec, distribution_module_names
def guess_module_names(distribution):
if distribution.has_metadata('top_level.txt'):
module_names = distribution.get_metadata('top_level.txt').splitlines()
else:
logger.info("Package %r has no top_level.txt. Guessing module name is %r.",
str(distribution.as_requirement()), distribution.project_name)
module_names = [distribution.project_name]
return module_names
def collect_installed_distributions():
for distribution in pkg_resources.working_set:
distribution_spec = str(distribution.as_requirement())
distribution_module_names = guess_module_names(distribution)
yield distribution_spec, distribution_module_names
def collect_distributions(specs):
for spec in specs:
try:
distribution = pkg_resources.get_distribution(spec)
except:
logger.info("Failed to find a distribution matching the spec: %r.", spec)
continue
distribution_spec = str(distribution.as_requirement())
distribution_module_names = guess_module_names(distribution)
yield distribution_spec, distribution_module_names
def valid_name(name, include_pattern='', exclude_pattern=NOMATCH_REGEX):
"""Return true iff the include_pattern matches the name and the the exclude_pattern doesn't.
:param str name: The name to validate.
:param str include_pattern: Include everything by default (r'').
:param str exclude_pattern: Exclude nothing by default (r'.\A').
:rtype: bool
"""
# NOTE: re auto-magically caches the compiled objects
return bool(re.match(include_pattern, name) and not re.match(exclude_pattern, name))
def import_module(module_name, module_blacklist_pattern=MODULE_BLACKLIST_PATTERN):
if not valid_name(module_name, exclude_pattern=module_blacklist_pattern):
raise ImportError("Not importing blacklisted module: %r.", module_name)
else:
return importlib.import_module(module_name)
def import_distributions(distribution_modules, module_blacklist_pattern=MODULE_BLACKLIST_PATTERN):
imported_module_names = []
for spec, module_names in distribution_modules:
for module_name in module_names:
try:
import_module(module_name)
imported_module_names.append(module_name)
except:
logger.info("Failed to import module %r from package %r.", module_name, spec)
return imported_module_names
def generate_module_objects(module, predicate=None):
try:
module_members = inspect.getmembers(module, predicate)
except:
logger.info("Failed to get member list from module %r.", module)
raise StopIteration
for object_name, object_ in module_members:
if inspect.getmodule(object_) is module:
yield object_name, object_
def generate_objects_from_modules(
modules, include_patterns,
exclude_patterns=EXCLUDE_PATTERNS,
predicate_name=None,
module_blacklist_pattern=MODULE_BLACKLIST_PATTERN,
object_blacklist_pattern=OBJECT_BLACKLIST_PATTERN,
):
exclude_patterns += [object_blacklist_pattern]
include_pattern = '|'.join(include_patterns) or NOMATCH_REGEX
exclude_pattern = '|'.join(exclude_patterns) or NOMATCH_REGEX
predicate = object_from_name(predicate_name) if predicate_name else None
for module_name, module in modules.items():
if not valid_name(module_name, exclude_pattern=module_blacklist_pattern):
logger.debug("Not collecting objects from blacklisted module: %r.", module_name)
continue
for object_name, object_ in generate_module_objects(module, predicate):
full_object_name = '{}:{}'.format(module_name, object_name)
if valid_name(full_object_name, include_pattern, exclude_pattern):
yield full_object_name, object_
def object_from_name(full_object_name):
module_name, _, object_name = full_object_name.partition(':')
module = importlib.import_module(module_name)
return getattr(module, object_name)
def generate_objects_from_names(stream):
for line in stream:
full_object_name = line.partition('#')[0].strip()
if full_object_name:
try:
yield full_object_name, object_from_name(full_object_name)
except ImportError:
logger.info("Failed to import module for object %r.", full_object_name)
except AttributeError:
logger.info("Failed to import object %r.", full_object_name)
| Python | 0 | |
e4bc3edf4180ac1385e125a11d01f222747b13f7 | send File Over FTP using ftplib | python/sendFileOverFTP.py | python/sendFileOverFTP.py | #---License---
#This is free and unencumbered software released into the public domain.
#Anyone is free to copy, modify, publish, use, compile, sell, or
#distribute this software, either in source code form or as a compiled
#binary, for any purpose, commercial or non-commercial, and by any
#means.
#by frainfreeze
#---Description---
#sends file over FTP using ftplib
#---code---
import ftplib
session = ftplib.FTP('myserver.com','login','passord')
myfile = open('theFile.txt','rb')
session.storbinary('STOR theFile.txt', myfile)
myfile.close()
session.quit()
| Python | 0 | |
b33725e2a3153b27312e820797bbc8375dbe8970 | Create beta_interweaving_strings_and_removing_digits.py | Solutions/beta_interweaving_strings_and_removing_digits.py | Solutions/beta_interweaving_strings_and_removing_digits.py | from itertools import zip_longest as zlo
from string import digits
interweave = lambda a,b: ''.join((i if i not in digits else '')+(j if j not in digits else '') for i,j in zlo(a, b, fillvalue = ''))
| Python | 0.000066 | |
6b53e890958251bd34c29b09f597c8221f4bc98b | Add sublime text utils module | modules/st_utils.py | modules/st_utils.py | import sublime
def open_window():
sublime.run_command("new_window")
return sublime.active_window()
| Python | 0 | |
ff2fba1c09cff57c9fb01ff3c12f076aff23d56a | Create __init__.py | __init__.py | __init__.py | #!/usr/bin/python
#-------------------------------IMPORT--------------------------------#
from lib import *
#-------------------------------EXPORT--------------------------------#
__all__ = ['<#PREFIX#>_app','<#PREFIX#>_index']
| Python | 0.000429 | |
1641de48deab3e6cc18de7eb40e1d02ab28dd88c | Create StarTrek.py | StarTrek.py | StarTrek.py | # Star Treck
| Python | 0 | |
ae93eaf84487339c5fba696c7900485f2918546e | Add __init__.py | __init__.py | __init__.py | from .pytypecheck import tc, tc_opts
from . import predicates
| Python | 0.006636 | |
59e9281d94acf529113697057d80bb6a1eac6191 | Add global init file | __init__.py | __init__.py | # -*- coding: utf-8 -*-
"""Marble: analyse social stratification in cities"""
__author__ = "Rémi Louf"
__email__ = "remilouf@sciti.es"
__website__ = "www.sciti.es"
__copyright__ = "2015, Rémi Louf"
| Python | 0.000001 | |
f06ebc1da601de961311c4b753e966227eadb911 | Create __init__.py | __init__.py | __init__.py | ""Backport of importlib.import_module from 3.x."""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| Python | 0.000429 | |
a09fa218a918fbde70ea99a67fa1d964b17c5e2c | add init | __init__.py | __init__.py | __all__ = ['bust']
from .detector import bust
| Python | 0.998609 | |
7b8136d77f2968ac02d17991eca30862bdf9e104 | add __init__ file | __init__.py | __init__.py | from resnet import *
| Python | 0.000128 | |
da874ace234dbac4f0fc8f428cf43d3f415cc596 | Create __init__.py | __init__.py | __init__.py | # __init__.py
| Python | 0.000429 | |
1e716efd7e275068a18d309f42ec8e955309b4b7 | Create __init__.py | __init__.py | __init__.py | Python | 0.000429 | ||
f7cc1a858ae88ac14ac18548d70d00c932359b7f | finish question 1060 | p1060_Missing_Element_In_Sorted_Array.py | p1060_Missing_Element_In_Sorted_Array.py | '''
- Leetcode problem: 1060
- Difficulty: Medium
- Brief problem description:
Given a sorted array A of unique numbers, find the K-th missing number starting from the leftmost number of the array.
Example 1:
Input: A = [4,7,9,10], K = 1
Output: 5
Explanation:
The first missing number is 5.
Example 2:
Input: A = [4,7,9,10], K = 3
Output: 8
Explanation:
The missing numbers are [5,6,8,...], hence the third missing number is 8.
Example 3:
Input: A = [1,2,4], K = 3
Output: 6
Explanation:
The missing numbers are [3,5,6,7,...], hence the third missing number is 6.
Note:
1 <= A.length <= 50000
1 <= A[i] <= 1e7
1 <= K <= 1e8
- Solution Summary:
[4, 7, 10, 13] k = 3
missing number = nums[i] - nums[0] - i
1. Binary Search to find the missing number.
7: 2 missing number before it
10: 4 missing number before it
So result is 7 + (k - missing number at 7)
Edge case: if k is larger enough that it's not contained in the list, we can return it directly.
- Used Resources:
--- Bo Zhou
'''
class Solution:
def missingElement(self, nums, k) -> int:
if (nums[-1] - nums[0] - (len(nums) - 1)) < k:
return k - (nums[-1] - nums[0] - (len(nums) - 1)) + nums[-1]
low, high = 0, len(nums) - 1
while low < high - 1:
mid = (high + low) // 2
if nums[mid] - nums[0] - mid < k:
low = mid
else:
high = mid
return k - (nums[low] - nums[0] - low) + nums[low]
if __name__ == "__main__":
solution = Solution()
testList = [1, 2, 4]
print(solution.missingElement(testList, 3))
| Python | 0.999999 | |
0a5710cae8597faf111486600fd90278bfc000f9 | reset head due to large file issue | dataForShaopeng/generate_data.py | dataForShaopeng/generate_data.py | import numpy as np
from CMash import MinHash as MH
import seaborn
import matplotlib.pyplot as plt
# Data prep
# In bash:
#mkdir dataForShaopeng
#cd dataForShaopeng/
#mkdir data
#cd data
#wget https://ucla.box.com/shared/static/c1g8xjc9glh68oje9e549fjqj0y8nc17.gz && tar -zxvf c1g8xjc9glh68oje9e549fjqj0y8nc17.gz && rm c1g8xjc9glh68oje9e549fjqj0y8nc17.gz # grabbed this from the Metalign setup_data.sh
#ls | xargs -I{} sh -c 'readlink -f {} >> ../file_names.txt'
#cd ..
#head -n 10 file_names.txt > file_names_10.txt
#head -n 100 file_names.txt > file_names_100.txt
#head -n 1000 file_names.txt > file_names_1000.txt
#cd ../scripts/
#python MakeStreamingDNADatabase.py ../dataForShaopeng/file_names_10.txt ../dataForShaopeng/TrainingDatabase_10_k_60.h5 -n 1000 -k 60 -v
#python MakeStreamingDNADatabase.py ../dataForShaopeng/file_names_100.txt ../dataForShaopeng/TrainingDatabase_100_k_60.h5 -n 1000 -k 60 -v
#python MakeStreamingDNADatabase.py ../dataForShaopeng/file_names_1000.txt ../dataForShaopeng/TrainingDatabase_1000_k_60.h5 -n 1000 -k 60 -v
def cluster_matrix(A_eps, A_indicies, cluster_eps=.01):
"""
This function clusters the indicies of A_eps such that for a given cluster, there is another element in that cluster
with similarity (based on A_eps) >= cluster_eps for another element in that same cluster. For two elements of
distinct clusters, their similarity (based on A_eps) < cluster_eps.
:param A_eps: The jaccard or jaccard_count matrix containing the similarities
:param A_indicies: The basis of the matrix A_eps (in terms of all the CEs)
:param cluster_eps: The similarity threshold to cluster on
:return: (a list of sets of indicies defining the clusters, LCAs of the clusters)
"""
#A_indicies_numerical = np.where(A_indicies == True)[0]
A_indicies_numerical = A_indicies
# initialize the clusters
clusters = []
for A_index in range(len(A_indicies_numerical)):
# Find nearby elements
nearby = set(np.where(A_eps[A_index, :] >= cluster_eps)[0]) | set(np.where(A_eps[:, A_index] >= cluster_eps)[0])
in_flag = False
in_counter = 0
in_indicies = []
for i in range(len(clusters)):
if nearby & clusters[i]:
clusters[i].update(nearby) # add the nearby indicies to the cluster
in_counter += 1 # keep track if the nearby elements belong to more than one of the previously formed clusters
in_indicies.append(i) # which clusters nearby shares elements with
in_flag = True # tells if it forms a new cluster
if not in_flag: # if new cluster, then append to clusters
clusters.append(set(nearby))
if in_counter > 1: # If it belongs to more than one cluster, merge them together
merged_cluster = set()
for in_index in in_indicies[::-1]:
merged_cluster.update(clusters[in_index])
del clusters[in_index] # delete the old clusters (now merged)
clusters.append(merged_cluster) # append the newly merged clusters
clusters_full_indicies = []
for cluster in clusters:
cluster_full_indicies = set()
for item in cluster:
cluster_full_indicies.add(A_indicies_numerical[item])
clusters_full_indicies.append(cluster_full_indicies)
# Check to make sure the clustering didn't go wrong
if sum([len(item) for item in clusters_full_indicies]) != len(A_indicies_numerical): # Check the correct number of indicies
raise Exception("For some reason, the total number of indicies in the clusters doesn't equal the number of indicies you started with")
if set([item for subset in clusters_full_indicies for item in subset]) != set(A_indicies_numerical): # Make sure no indicies were missed or added
raise Exception("For some reason, the indicies in all the clusters doesn't match the indicies you started with")
return clusters_full_indicies#, cluster_LCAs(clusters_full_indicies, taxonomy)
n = 1000
cluster_eps = .01
CEs = MH.import_multiple_from_single_hdf5(f"/home/dkoslicki/Desktop/CMash/dataForShaopeng/TrainingDatabase_{n}_k_60.h5")
mat = MH.form_jaccard_matrix(CEs)
clusters_full_indicies = cluster_matrix(mat, range(n), cluster_eps=cluster_eps)
cluster_sizes = [len(x) for x in clusters_full_indicies]
max_cluster_loc = np.argmax(cluster_sizes)
max_cluster_indicies = list(clusters_full_indicies[max_cluster_loc])
print(len(max_cluster_indicies))
sub_mat = mat[max_cluster_indicies,:][:,max_cluster_indicies]
sub_CEs = [CEs[x] for x in max_cluster_indicies]
out_file_names = [x.input_file_name.decode('utf-8') for x in sub_CEs]
fid = open('/home/dkoslicki/Desktop/CMash/dataForShaopeng/to_select.txt', 'w')
for name in out_file_names:
fid.write(f"{name}\n")
fid.close()
seaborn.heatmap(sub_mat)
plt.show()
seaborn.clustermap(sub_mat)
plt.show()
# to check the kinds of organisms
#cat to_select.txt | xargs -I{} sh -c 'zcat {} | head -n 1'
| Python | 0 | |
d72f11fbfc23de44af8a2600a7310adafe3e2ffe | Create a.py | agc015/a.py | agc015/a.py | def main():
n, a, b = map(int, input().split())
if a > b or (n == 1 and a != b):
print(0)
else:
print((n - 1) * (b - a) - (b - a - 1))
if __name__ == '__main__':
main()
| Python | 0.000489 | |
47ba8815c7a0de0191fb363c22c42732781a8e38 | Fix blank index_for | daiquiri/metadata/migrations/0020_blank_index_for.py | daiquiri/metadata/migrations/0020_blank_index_for.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-04-05 15:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daiquiri_metadata', '0019_column_index_for'),
]
operations = [
migrations.AlterField(
model_name='column',
name='index_for',
field=models.CharField(blank=True, default=b'', help_text='The columns which this column is an index for (e.g. for pgSphere).', max_length=256, verbose_name='Index for'),
),
]
| Python | 0.999194 | |
a9d5c1dcb059f02f6c3ec5dbff6b07f54c20194d | Add an example directory | example/main.py | example/main.py | from beckett import clients, resources
class PersonResource(resources.BaseResource):
class Meta:
name = 'Person'
resource_name = 'people'
identifier = 'url'
attributes = (
'name',
'birth_year',
'eye_color',
'gender',
'height',
'mass',
'url',
)
valid_status_codes = (
200,
)
methods = (
'get',
)
pagination_key = None
class StarWarsClient(clients.BaseClient):
class Meta:
name = 'Star Wars API Client'
base_url = 'http://swapi.co/api'
resources = (
PersonResource,
)
swapi = StarWarsClient()
results_list = swapi.get_person(uid=1)
person = results_list[0]
print(person.name)
| Python | 0.000002 | |
3367f9d1e394bf686bc6bbd6316265c9feef4f03 | Add basic tests for config usb | test/on_yubikey/test_cli_config.py | test/on_yubikey/test_cli_config.py | from .util import (DestructiveYubikeyTestCase, ykman_cli)
class TestConfigUSB(DestructiveYubikeyTestCase):
def setUp(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def tearDown(self):
ykman_cli('config', 'usb', '--enable-all', '-f')
def test_disable_otp(self):
ykman_cli('config', 'usb', '--disable', 'OTP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OTP', output)
def test_disable_u2f(self):
ykman_cli('config', 'usb', '--disable', 'U2F', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO U2F', output)
def test_disable_openpgp(self):
ykman_cli('config', 'usb', '--disable', 'OPGP', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OpenPGP', output)
def test_disable_piv(self):
ykman_cli('config', 'usb', '--disable', 'PIV', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('PIV', output)
def test_disable_oath(self):
ykman_cli('config', 'usb', '--disable', 'OATH', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('OATH', output)
def test_disable_fido2(self):
ykman_cli('config', 'usb', '--disable', 'FIDO2', '-f')
output = ykman_cli('config', 'usb', '--list')
self.assertNotIn('FIDO2', output)
| Python | 0 | |
b8e466a8671be716397d136e16814790b7ed594a | Create Scaffold | SteinerTree.py | SteinerTree.py | import random
import numpy
import matplotlib.pyplot as plt
from deap import base, creator, tools, algorithms
'''
SteinerTree
Code scaffold for solving the SteinerTree problem using the
DEAP framework https://github.com/deap/deap
(currently OneMax problem)
you can find more information about DEAP in the notebooks at:
https://github.com/DEAP/notebooks
version: 0.1
authors: Xiaoqian Xiong, Raoul Nicolodi, Martin Kaufleitner, Aurelien Hontabat
license: MIT
'''
# Type Creation
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
# Individual and Population
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=10)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Evaluation Function
def evalOneMax(individual):
return sum(individual), # don't forget the comma (returns tupple)
# Genetic Operators
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
# Evolving the Population
def main():
pop = toolbox.population(n=50)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=10, stats=stats, halloffame=hof, verbose=True)
return pop, logbook, hof
# Visualize Results
pop, log, hof = main()
print("Best individual is: %s\nwith fitness: %s" % (hof[0], hof[0].fitness))
gen, avg, min_, max_ = log.select("gen", "avg", "min", "max")
plt.plot(gen, avg, label="average")
plt.plot(gen, min_, label="minimum")
plt.plot(gen, max_, label="maximum")
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.legend(loc="lower right")
# Alternative: defining the generation step process
# population = toolbox.population(n=300)
# NGEN=40
#
# for gen in range(NGEN):
#
# offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)
# fits = toolbox.map(toolbox.evaluate, offspring)
#
# for fit, ind in zip(fits,offspring):
# ind.fitness.values = fit
#
# population = toolbox.select(offspring, k=len(population))
#
# top10 = tools.selBest(population, k=10)
#
# print(top10)
| Python | 0.000001 | |
4dbe1b21ab0f82eeba82be7db2e141260942b998 | add num_states to mixture convenience wrapper | basic/models.py | basic/models.py | # These classes make aliases of class members and properties so as to make
# pybasicbayes mixture models look more like pyhsmm models. When comparing
# H(S)MM model fits to pybasicbayes mixture model fits, it's easier to write one
# code path by using these models.
import pybasicbayes
from ..util.general import rle
class _Labels(pybasicbayes.internals.labels.Labels):
@property
def T(self):
return self.N
@property
def stateseq(self):
return self.z
@stateseq.setter
def stateseq(self,stateseq):
self.z = stateseq
@property
def stateseqs_norep(self):
return rle(self.z)[0]
@property
def durations(self):
return rle(self.z)[1]
class _MixturePropertiesMixin(object):
_labels_class = _Labels
@property
def num_states(self):
return len(self.obs_distns)
@property
def states_list(self):
return self.labels_list
@property
def stateseqs(self):
return [s.stateseq for s in self.states_list]
@property
def stateseqs_norep(self):
return [s.stateseq_norep for s in self.states_list]
@property
def durations(self):
return [s.durations for s in self.states_list]
@property
def obs_distns(self):
return self.components
@obs_distns.setter
def obs_distns(self,distns):
self.components = distns
class Mixture(_MixturePropertiesMixin,pybasicbayes.models.Mixture):
pass
class MixtureDistribution(_MixturePropertiesMixin,pybasicbayes.models.MixtureDistribution):
pass
| # These classes make aliases of class members and properties so as to make
# pybasicbayes mixture models look more like pyhsmm models. When comparing
# H(S)MM model fits to pybasicbayes mixture model fits, it's easier to write one
# code path by using these models.
import pybasicbayes
from ..util.general import rle
class _Labels(pybasicbayes.internals.labels.Labels):
@property
def T(self):
return self.N
@property
def stateseq(self):
return self.z
@stateseq.setter
def stateseq(self,stateseq):
self.z = stateseq
@property
def stateseqs_norep(self):
return rle(self.z)[0]
@property
def durations(self):
return rle(self.z)[1]
class _MixturePropertiesMixin(object):
_labels_class = _Labels
@property
def states_list(self):
return self.labels_list
@property
def stateseqs(self):
return [s.stateseq for s in self.states_list]
@property
def stateseqs_norep(self):
return [s.stateseq_norep for s in self.states_list]
@property
def durations(self):
return [s.durations for s in self.states_list]
@property
def obs_distns(self):
return self.components
@obs_distns.setter
def obs_distns(self,distns):
self.components = distns
class Mixture(_MixturePropertiesMixin,pybasicbayes.models.Mixture):
pass
class MixtureDistribution(_MixturePropertiesMixin,pybasicbayes.models.MixtureDistribution):
pass
| Python | 0.000001 |
fb7a5b279da36b9dbd6338867168a79011edd0d6 | Create new package (#7208) | var/spack/repos/builtin/packages/glimmer/package.py | var/spack/repos/builtin/packages/glimmer/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Glimmer(MakefilePackage):
"""Glimmer is a system for finding genes in microbial DNA, especially the
genomes of bacteria, archaea, and viruses."""
homepage = "https://ccb.jhu.edu/software/glimmer"
version('3.02b', '344d012ae12596de905866fe9eb7f16c')
build_directory = 'src'
def url_for_version(self, version):
url = "https://ccb.jhu.edu/software/glimmer/glimmer{0}.tar.gz"
return url.format(version.joined)
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
| Python | 0 | |
ec54ae0e59058cadecec38fb70be2947d0907d6a | Hello World | pyexamples/conditions.py | pyexamples/conditions.py |
print('Hello World')
| Python | 0.99998 | |
921244353f75292fc26f521ad081fd587c2b2b25 | Create pyramid_mongo_session.py | pyramid_mongo_session.py | pyramid_mongo_session.py | # encoding: utf-8
from pyramid.interfaces import ISession
from pyramid.compat import PY3, pickle
from pyramid.session import manage_accessed, manage_changed
from zope.interface import implementer
from hashlib import md5
import time
import os
import re
def get_random():
return md5(os.urandom(32)).hexdigest()
# to_pickle - пусть отвечает за пикление, а там value['pickled']
def MongoSessionFactory(
collection,
to_pickle=False,
cookie_name='session',
max_age=None,
path='/',
domain=None,
secure=False,
httponly=False,
timeout=3600,
reissue_time=0,
set_on_exception=True,
):
@implementer(ISession)
class MongoSession(dict):
""" Dictionary-like session object, based on CookieSession """
# configuration parameters
_collection = collection
_to_pickle = to_pickle
_cookie_name = cookie_name
_cookie_max_age = max_age
_cookie_path = path
_cookie_domain = domain
_cookie_secure = secure
_cookie_httponly = httponly
_cookie_on_exception = set_on_exception
_timeout = timeout
_reissue_time = reissue_time
# dirty flag
_dirty = False
def __init__(self, request):
self.request = request
now = time.time()
created = renewed = now
new = True
value = None
state = {}
cookieval = self._get_cookie()
if cookieval:
value = self._collection.find_one({'_id': cookieval})
if value is not None:
try:
renewed = float(value.get('accessed'))
created = float(value.get('created'))
sval = value.get('value')
pickled = value.get('pickled')
state = sval
if pickled:
if not PY3:
sval = sval.encode('utf-8') # dammit
state = pickle.loads(sval)
new = False
except (TypeError, ValueError, pickle.PickleError):
# value failed to unpack properly or renewed was not
# a numeric type so we'll fail deserialization here
state = {}
if self._timeout is not None:
if now - renewed > self._timeout:
# expire the session because it was not renewed
# before the timeout threshold
state = {}
self.created = created
self.accessed = renewed
self.renewed = renewed
self.new = new
dict.__init__(self, state)
# ISession methods
def changed(self):
if not self._dirty:
self._dirty = True
def set_cookie_callback(request, response):
self._set_cookie(response)
self.request = None # explicitly break cycle for gc
self.request.add_response_callback(set_cookie_callback)
def invalidate(self):
cookieval = self._get_cookie()
self._collection.delete_one({'_id': cookieval})
self.clear() # XXX probably needs to unset cookie. But...
# non-modifying dictionary methods
get = manage_accessed(dict.get)
__getitem__ = manage_accessed(dict.__getitem__)
items = manage_accessed(dict.items)
values = manage_accessed(dict.values)
keys = manage_accessed(dict.keys)
__contains__ = manage_accessed(dict.__contains__)
__len__ = manage_accessed(dict.__len__)
__iter__ = manage_accessed(dict.__iter__)
if not PY3:
iteritems = manage_accessed(dict.iteritems)
itervalues = manage_accessed(dict.itervalues)
iterkeys = manage_accessed(dict.iterkeys)
has_key = manage_accessed(dict.has_key)
# modifying dictionary methods
clear = manage_changed(dict.clear)
update = manage_changed(dict.update)
setdefault = manage_changed(dict.setdefault)
pop = manage_changed(dict.pop)
popitem = manage_changed(dict.popitem)
__setitem__ = manage_changed(dict.__setitem__)
__delitem__ = manage_changed(dict.__delitem__)
# flash API methods
@manage_changed
def flash(self, msg, queue='', allow_duplicate=True):
storage = self.setdefault('_f_' + queue, [])
if allow_duplicate or (msg not in storage):
storage.append(msg)
@manage_changed
def pop_flash(self, queue=''):
storage = self.pop('_f_' + queue, [])
return storage
@manage_accessed
def peek_flash(self, queue=''):
storage = self.get('_f_' + queue, [])
return storage
# CSRF API methods
@manage_changed
def new_csrf_token(self):
token = get_random()
self['_csrft_'] = token
return token
@manage_accessed
def get_csrf_token(self):
token = self.get('_csrft_', None)
if token is None:
token = self.new_csrf_token()
return token
# non-API methods
def _get_cookie(self): # cookie value, not value itself
value = self.request.cookies.get(self._cookie_name, '')
value = re.sub('[^a-f0-9]', '', value)
return value
def _set_cookie(self, response):
if not self._cookie_on_exception:
exception = getattr(self.request, 'exception', None)
if exception is not None: # dont set a cookie during exceptions
return False
cookieval = self.new and get_random() or self._get_cookie()
if not cookieval:
return False
value = self._to_pickle and pickle.dumps(dict(self)) or dict(self)
data = dict(accessed=self.accessed,
created=self.created,
value=value,
pickled=self._to_pickle,
_id=cookieval)
self._collection.replace_one({'_id': cookieval}, data, upsert=True)
response.set_cookie(
self._cookie_name,
value=cookieval,
max_age=self._cookie_max_age,
path=self._cookie_path,
domain=self._cookie_domain,
secure=self._cookie_secure,
httponly=self._cookie_httponly)
return True
return MongoSession
| Python | 0.000238 | |
fe1af6449ec4feeaf75a248422e806ad9c818749 | remove doc | python/qidoc/__init__.py | python/qidoc/__init__.py | ## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" qidoc : handle generating sphinx and doxygen documentation of qibuild
projects
"""
| ## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" qidoc : handle generating sphinx and doxygen documentation of qibuild
projects
qiDoc: documentation generator
==============================
qiDoc helps you easily write and merge several
documentation formats, doxygen and sphinx for the moment.
Usage:
-----
qidoc is controlled by a simple config file, looking like
.. code-block:: xml
<qidoc>
<repo name="qibuild">
<sphinxdoc name="qibuild" src="doc" />
</repo>
<repo name="libnaoqi" >
<doxydoc name="libalcommon" src="libalcommon/doc" />
<doxydoc name="libalvision" src="libalvisio/doc" />
</repo>
<repo name="doc">
<sphinxdoc name="doc" src="source" dest="." />
</repo>
<defaults>
<root_project name="doc" />
</defaults>
<templates>
<doxygen
doxyfile="soure/tools/Doxyfile.template"
css="soure/tools/doxygen.template.css"
header="soure/tools/header.template.html"
footer="soure/tools/footer.template.html"
/>
<sphinx
config="source/conf.py"
/>
</templates>
</qidoc>
Such a file will produce a documentation looking like
::
doc/ index.html (doc)
/ libalmotion/index (doxy libnaoqi/almotion)
/ libalvision/index (doxy libnaoqi/avisiion)
/ qibuild/index (sphinx qibuild)
"""
| Python | 0.00001 |
ceb8ec420e5e894644aecce8b96463cc3769ce1d | Add process_alerts management command | cityhallmonitor/management/commands/process_alerts.py | cityhallmonitor/management/commands/process_alerts.py | from django.conf import settings
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from cityhallmonitor.models import Subscription
from documentcloud import DocumentCloud
DEFAULT_PROJECT = 'Chicago City Hall Monitor'
EMAIL_SUBJECT = 'City Hall Monitor Search Alert'
EMAIL_FROM = 'KnightLab@northwestern.edu'
EMAIL_TEMPLATE = """
<p>You alert subscription on City Hall Monitor:
</p>
<p>%(query)s
</p>
<p>Matched %(n)d new documents:</p>
"""
EMAIL_DOC_TEMPLATE = """
<p>%(matter)s<br>
<a href="%(link_url)s">%(link_text)s</a>
</p>
"""
class Command(BaseCommand):
help = 'Process user alert subscriptions.'
_client = None
def client(self):
"""Using un-authenticated client..."""
if self._client is None:
self._client = DocumentCloud()
return self._client
def add_arguments(self, parser):
pass # noop
def search(self, query):
return self.client().documents.search(query)
def send_subscription_alert(self, subscription, document_list):
"""Send user subscription alert"""
n_documents = len(document_list)
html_message = EMAIL_TEMPLATE % ({
'query': subscription.query,
'n': n_documents
})
for doc in document_list:
html_message += EMAIL_DOC_TEMPLATE % {
'matter': doc.data['MatterTitle'],
'link_url': doc.published_url,
'link_text': doc.title
}
print('Sending alert for %d documents [%s]' % (
n_documents, subscription))
send_mail(
EMAIL_SUBJECT,
'',
EMAIL_FROM,
[subscription.email],
fail_silently=False,
html_message=html_message)
def process_subscription(self, subscription):
"""Process subscription"""
query = 'account:%s project:"%s" %s' % (
settings.DOCUMENT_CLOUD_ACCOUNT,
DEFAULT_PROJECT,
subscription.query)
print(query)
r = self.search(query)
if subscription.last_check:
r = [d for d in r if d.updated_at > subscription.last_check]
try:
if len(r):
self.send_subscription_alert(subscription, r)
subscription.last_check = timezone.now()
subscription.save()
except SMTPException as se:
self.stdout.write(
'ERROR sending email for subscription %d: %s' % \
(subscription.id, str(se)))
def handle(self, *args, **options):
"""Process subscriptions"""
subscription_list = Subscription.objects.all()
print('Processing %d subscriptions' % len(subscription_list))
for subscription in subscription_list:
self.process_subscription(subscription)
self.stdout.write('Done')
| Python | 0.000003 | |
59a0996644115bef57de6e15dc572a6227e45b3a | Add script for downloading weekly price data from India | data_crunching/download_india_prices.py | data_crunching/download_india_prices.py | #!/usr/bin/env python2
import urllib
import urllib2
import shutil
import re
import sys
import datetime
usage_str = '''
This scripts downloads weekly food prices from http://rpms.dacnet.nic.in/Bulletin.aspx in XLS format.
'''
def download_spreadsheet(date_string):
main_url = 'http://rpms.dacnet.nic.in/Bulletin.aspx'
params = '__VIEWSTATE=%2FwEPDwUKLTMyOTA3MjI1Ng9kFgICAQ9kFgQCDQ8QZGQWAWZkAhgPFCsABWQoKVhTeXN0ZW0uR3VpZCwgbXNjb3JsaWIsIFZlcnNpb249NC4wLjAuMCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj1iNzdhNWM1NjE5MzRlMDg5JDc5NWYyNmMxLTc5OTYtNDljNy04ZmNiLTEwMWYyZTVjMDljYQIBFCsAATwrAAQBAGZkFgICAQ9kFgJmD2QWAmYPZBYMAgEPDxYCHgdWaXNpYmxlaGRkAgIPZBYCAgIPFgIeBVZhbHVlBQVmYWxzZWQCAw9kFgJmD2QWAmYPDxYCHwBoZGQCBQ9kFgICAg8WAh8BBQVmYWxzZWQCBg9kFgJmD2QWAmYPZBYEZg8PZBYCHgVzdHlsZQUQdmlzaWJpbGl0eTpub25lO2QCAw9kFgQCAQ8WAh4HRW5hYmxlZGhkAgQPFgIfAQUDMTAwZAIKD2QWAgIBDxYCHwEFBUZhbHNlZBgBBR5fX0NvbnRyb2xzUmVxdWlyZVBvc3RCYWNrS2V5X18WAgUdUmVwb3J0Vmlld2VyMTpUb2dnbGVQYXJhbTppbWcFF1JlcG9ydFZpZXdlcjE6X2N0bDg6aW1ngH4V0uEh9SSOm1xq3bgJgGyDNgIP96LELxzNCJ%2FutD8%3D&__EVENTVALIDATION=%2FwEWHgLr9e3WDgKkjKiDDgKgwpPxDQKsrPr5DAK896w2Ap6Ss%2BMJAvHkkI8EAveMotMNAu7cnNENAqLH6hoC0veCxAoCjOeKxgYC3YHBwAQCyYC91AwCyIC91AwCp5zChwMCp5z%2BmA0CyI%2B56wYCxNb9oA8CzO7%2BnQkC6MPz%2FQkCvKm9vQwC0vCh0QoC4qOPjw8C9%2FmBmAICstrw7ggC%2Fa2qigkCgoOCmg8CgoOW9QcCgoPa4w062z2PEYfDeoZgfbqdsNPMXUtlCnyUt5wzsv6RVn9PnA%3D%3D&TxtDate=#{TXTDATE}&RadioButtonList1=Food+Items&DDLReportFormat=MS+Excel&Button1=Generate+Report&TxtVolume=XXXX+NO+03&ReportViewer1%3A_ctl3%3A_ctl0=&ReportViewer1%3A_ctl3%3A_ctl1=&ReportViewer1%3A_ctl11=&ReportViewer1%3A_ctl12=quirks&ReportViewer1%3AAsyncWait%3AHiddenCancelField=False&ReportViewer1%3AToggleParam%3Astore=&ReportViewer1%3AToggleParam%3Acollapse=false&ReportViewer1%3A_ctl9%3AClientClickedId=&ReportViewer1%3A_ctl8%3Astore=&ReportViewer1%3A_ctl8%3Acollapse=false&ReportViewer1%3A_ctl10%3AVisibilityState%3A_ctl0=None&ReportViewer1%3A_ctl10%3AScrollPosition=&ReportViewer1%3A_ctl10%3AReportControl%3A_ctl2=&ReportViewer1%3A_ctl10%3AReportControl%3A_ctl3=&ReportViewer1%3A_ctl10%3AReportControl%3A_ctl4=100'
params = params.replace('#{TXTDATE}', date_string)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
out_file_name = re.sub('/', '_', date) + '.xls'
print "### Output file:", out_file_name
myfile = open(out_file_name, 'wb')
shutil.copyfileobj(response.fp, myfile)
myfile.close()
print "### Finished."
def validate_date(date_string):
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
date = datetime.date(year, month, day)
if date.weekday() != 4:
sys.exit("ERROR: the date entered is not Friday, too bad")
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date = sys.argv[1]
validate_date(date)
download_spreadsheet(date)
| Python | 0 | |
44597a9b9f5e2ef2eb391b096d3240b81960ce68 | fix doc generation on plot_lasso_coordinate_descent_path.py example (pb on my box) | examples/glm/plot_lasso_coordinate_descent_path.py | examples/glm/plot_lasso_coordinate_descent_path.py | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
"""
print __doc__
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
from itertools import cycle
import numpy as np
import pylab as pl
from scikits.learn.glm import lasso_path, enet_path
from scikits.learn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(0) # Standardize data (easier to set the rho parameter)
################################################################################
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print "Computing regularization path using the lasso..."
models = lasso_path(X, y, eps=eps)
alphas_lasso = np.array([model.alpha for model in models])
coefs_lasso = np.array([model.coef_ for model in models])
print "Computing regularization path using the elastic net..."
models = enet_path(X, y, eps=eps, rho=0.8)
alphas_enet = np.array([model.alpha for model in models])
coefs_enet = np.array([model.coef_ for model in models])
################################################################################
# Display results
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_lasso)
l2 = pl.plot(coefs_enet, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Lasso and Elastic-Net Paths')
pl.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
pl.axis('tight')
pl.show()
| """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
"""
print __doc__
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
from itertools import cycle
import numpy as np
import pylab as pl
from scikits.learn.glm import lasso_path, enet_path
from scikits.learn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(0) # Standardize data (easier to set the rho parameter)
################################################################################
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print "Computing regularization path using the lasso..."
models = lasso_path(X, y, eps=eps)
alphas_lasso = np.array([model.alpha for model in models])
coefs_lasso = np.array([model.coef_ for model in models])
print "Computing regularization path using the elastic net..."
models = enet_path(X, y, eps=eps, rho=0.8)
alphas_enet = np.array([model.alpha for model in models])
coefs_enet = np.array([model.coef_ for model in models])
################################################################################
# Display results
color_iter = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
for color, coef_lasso, coef_enet in zip(color_iter,
coefs_lasso.T, coefs_enet.T):
pl.plot(-np.log10(alphas_lasso), coef_lasso, color)
pl.plot(-np.log10(alphas_enet), coef_enet, color + '--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Lasso and Elastic-Net Paths')
pl.legend(['Lasso','Elastic-Net'])
pl.axis('tight')
pl.show()
| Python | 0 |
c12305c59f9c95149e95094179768ac627d7faf9 | Add test file for upload client class | tests/client/test_upload_client.py | tests/client/test_upload_client.py | import os
import mock
import unittest
from qingstor.sdk.config import Config
from qingstor.sdk.service.qingstor import Bucket
from qingstor.sdk.service.qingstor import QingStor
from qingstor.sdk.client.upload_client import UploadClient
from qingstor.sdk.error import (
BadRequestError,
InvalidObjectNameError
)
TEST_PART_SIZE=5242880
TEST_FILE_PATH='test_file_100M'
TEST_OBJECT_KEY='test_upload_20170804'
TEST_ACCESS_KEY='This_is_mock_access_key'
TEST_SECRET_ACCESS_KEY='This_is_mock_secret_access_key'
class MockBucket:
def __init__(self,status_code):
self.status_code = status_code
# Mock the upload_id
def __getitem__(self, key):
return 000000000000
class CallbackFunc:
def __init__(self):
pass
def callback_func(self):
pass
class TestUploadClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
output200=MockBucket(200)
cls.mock_http200=mock.Mock(return_value=output200)
output201=MockBucket(201)
cls.mock_http201=mock.Mock(return_value=output201)
output400=MockBucket(400)
cls.mock_http400=mock.Mock(return_value=output400)
config=Config(TEST_ACCESS_KEY,TEST_SECRET_ACCESS_KEY)
# QingStor.Bucket=mock_qingstor
qingstor=QingStor(config)
# Create bucket instance
callback_func=CallbackFunc()
bucket=qingstor.Bucket('test_upload_bucket','pek3a')
cls.upload_obj = UploadClient(bucket, callback_func.callback_func, TEST_PART_SIZE)
def setUp(self):
os.system("dd if=/dev/zero of=test_file_100M bs=1024 count=102400")
def tearDown(self):
os.system("rm -f test_file_100M")
def test_right_response(self):
# Mock the output of initiate_multipart_upload
Bucket.initiate_multipart_upload=self.mock_http200
# Mock the output of upload_multipart
Bucket.upload_multipart=self.mock_http201
Bucket.complete_multipart_upload=self.mock_http201
with open(TEST_FILE_PATH, 'rb') as f:
self.upload_obj.upload('upload_20180803.mp4', f)
def test_initialize_bad_response(self):
# Mock the output of initiate_multipart_upload
Bucket.initiate_multipart_upload=self.mock_http400
with open(TEST_FILE_PATH, 'rb') as f:
self.assertRaises(InvalidObjectNameError,self.upload_obj.upload,TEST_OBJECT_KEY,f)
def test_upload_bad_response(self):
# Mock the output of initiate_multipart_upload
Bucket.initiate_multipart_upload=self.mock_http200
# Mock the output of upload_multipart
Bucket.upload_multipart=self.mock_http400
with open(TEST_FILE_PATH, 'rb') as f:
self.assertRaises(BadRequestError,self.upload_obj.upload,TEST_OBJECT_KEY,f)
if __name__=="__main__":
unittest.main()
| Python | 0 | |
305ab2ede27fde9097c7a69804189a529c868140 | add missing filter plugins | filter_plugins/customs.py | filter_plugins/customs.py | class FilterModule(object):
def filters(self):
return {
'filename_without_extension': self.filename_without_extension
}
def filename_without_extension(self, path, extension):
return path[:-len(extension)] | Python | 0.000001 | |
9296c11a013fc03c7f299707a12b74a4ef85abf7 | update cStringIO counter | gdb_StringIO.py | gdb_StringIO.py | import gdb
"""
SPECS
/* Entries related to the type of user set breakpoints. */
static struct pybp_code pybp_codes[] =
{
{ "BP_NONE", bp_none},
{ "BP_BREAKPOINT", bp_breakpoint},
{ "BP_WATCHPOINT", bp_watchpoint},
{ "BP_HARDWARE_WATCHPOINT", bp_hardware_watchpoint},
{ "BP_READ_WATCHPOINT", bp_read_watchpoint},
{ "BP_ACCESS_WATCHPOINT", bp_access_watchpoint},
{NULL} /* Sentinel. */
};
gdb.BreakPoint.__init__
static char *keywords[] = { "spec", "type", "wp_class", "internal", NULL };
if (! PyArg_ParseTupleAndKeywords (args, kwargs, "s|iiO", keywords,
&spec, &type, &access_type, &internal))
"""
O_s = {}
gdb_eval = gdb.parse_and_eval
gdb_O_object_type = gdb.lookup_type("Oobject")
def StringO_New_BP(gdb.BreakPoint):
def __init__(self):
super(gdb.BreakPoint, self).__init__(
spec = "cStringIO.c:566",
type = gdb.BPWATCHPOINT,
wp_class = gdb.WP_READ
)
def stop(self):
O_self = gdb_eval("self")
address = str(O_self.address)
O_object = O_self.cast(gdb_O_object_type)
infos = {
"pos": str(O_object["pos"]),
"string_size": str(O_object["string_size"]),
"buf_size": str(O_object["buf_size"]),
"softspace": str(O_object["softspace"])
}
if address in O_s:
if O_s[address]:
O_s[address]["new"] = infos
else:
O_s[address] = {"new": infos}
else:
O_s[address] = {"new": infos}
def StringO_Dealloc_BP(gdb.BreakPoint):
def __init__(self):
super(gdb.BreakPoint, self).__init__(
spec = "cStringIO.c:518",
type = gdb.BPWATCHPOINT,
wp_class = gdb.WP_READ
)
def stop(self):
O_self = gdb_eval("self")
address = str(O_self.address)
O_object = O_self.cast(gdb_O_object_type)
infos = {
"pos": str(O_object["pos"]),
"string_size": str(O_object["string_size"]),
"buf_size": str(O_object["buf_size"]),
"softspace": str(O_object["softspace"])
}
if address in O_s:
if O_s[address]:
O_s[address]["dealloc"] = infos
else:
O_s[address] = {"dealloc": infos}
else:
O_s[address] = {"dealloc": infos}
| Python | 0 | |
96a94b6901ebca0930a2509ccadfd603f8558b8d | Add test case for event items | tests/test_event.py | tests/test_event.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
from lxml import html
from utils.summary_downloader import SummaryDownloader
from utils.data_handler import DataHandler
from parsers.team_parser import TeamParser
from parsers.game_parser import GameParser
from parsers.roster_parser import RosterParser
from parsers.event_parser import EventParser
def test_event(tmpdir):
date = "Oct 12, 2016"
game_id = "020001"
sdl = SummaryDownloader(
tmpdir.mkdir('shot').strpath, date, zip_summaries=False)
sdl.run()
dld_dir = sdl.get_tgt_dir()
ep = get_event_parser(dld_dir, game_id)
event = ep.get_event(ep.event_data[1])
assert event.event_id == 20160200010002
assert event.game_id == 2016020001
assert event.in_game_event_cnt == 2
assert event.type == 'FAC'
assert event.period == 1
assert event.time == datetime.timedelta(0)
assert event.road_on_ice == [
8475172, 8473463, 8470599, 8476853, 8475716, 8475883]
assert event.home_on_ice == [
8474250, 8473544, 8471676, 8470602, 8476879, 8467950]
assert event.road_score == 0
assert event.home_score == 0
assert event.x == 0
assert event.y == 0
assert event.road_goalie == 8475883
assert event.home_goalie == 8467950
assert event.raw_data == (
"TOR won Neu. Zone - TOR #43 KADRI vs OTT #19 BRASSARD")
def get_document(dir, game_id, prefix):
dh = DataHandler(dir)
return open(dh.get_game_data(game_id, prefix)[prefix]).read()
def get_json_document(dir, game_id):
dh = DataHandler(dir)
return json.loads(open(dh.get_game_json_data(game_id)).read())
def get_event_parser(dir, game_id):
"""
Retrieves event parser for game with specified id from
data downloaded to directory also given.
"""
# retrieving raw data
game_report_doc = html.fromstring(get_document(dir, game_id, 'GS'))
roster_report_doc = html.fromstring(get_document(dir, game_id, 'ES'))
play_by_play_report_doc = html.fromstring(
get_document(dir, game_id, 'PL'))
game_feed_json_doc = get_json_document(dir, game_id)
# using team parser to retrieve teams
tp = TeamParser(game_report_doc)
teams = tp.create_teams()
# using game parser to retrieve basic game information
gp = GameParser(game_id, game_report_doc)
game = gp.create_game(teams)
# using roster parser to retrieve team rosters
rp = RosterParser(roster_report_doc)
rosters = rp.create_roster(game, teams)
# using event parser to retrieve all raw events
ep = EventParser(play_by_play_report_doc, game_feed_json_doc)
ep.load_data()
(ep.game, ep.rosters) = (game, rosters)
ep.cache_plays_with_coordinates()
return ep
| Python | 0.000002 | |
bc581b1ac7a12fd3026667663a4812fe0bd3869b | add dict.py | dict.py | dict.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import json
import urllib
import subprocess
def main():
word = subprocess.check_output('xsel')
params = urllib.urlencode({'from': 'auto', 'to': 'auto', 'client_id':'WGCxN9fzvCxPo0nqlzGLCPUc', 'q': word})
f = urllib.urlopen("http://openapi.baidu.com/public/2.0/bmt/translate?%s", params)
j = json.loads(f.read())
d = dict(j['trans_result'][0])
subprocess.call(['notify-send', word, d['dst']])
if __name__ == '__main__':
main()
| Python | 0.000002 | |
28b5fef57580640cd78775d6c0544bc633e5958a | Add helper script to generate API keys. | generate-key.py | generate-key.py | #!/usr/bin/python
import os
import sqlite3
import sys
import time
if len(sys.argv) < 3:
raise ValueError('Usage: %s "Firstnam Lastname" email@example.com' % sys.argv[0])
db = sqlite3.connect('/var/lib/zon-api/data.db')
api_key = str(os.urandom(26).encode('hex'))
tier = 'free'
name = sys.argv[1]
email = sys.argv[2]
requests = 0
reset = int(time.time())
query = 'INSERT INTO client VALUES (?, ?, ?, ?, ?, ?)'
db.execute(query, (api_key, tier, name, email, requests, reset))
db.commit()
db.close()
print api_key
| Python | 0 | |
a314da2415e661ed6cbc9929095a0f34610d9c21 | FIX _get_search_domain in partner | transindar_personalization/res_partner.py | transindar_personalization/res_partner.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, fields
class res_partner(models.Model):
_inherit = 'res.partner'
@api.model
def name_search(
self, name, args=None, operator='ilike', limit=100):
recs = self.search(self._get_search_domain(
name, args=args, operator=operator, limit=limit), limit=limit)
if not recs:
return super(res_partner, self).name_search(
name=name, args=args, operator=operator, limit=limit)
return recs.name_get()
@api.model
def _get_search_domain(self, name, args=None, operator='ilike', limit=100):
if not args:
args = []
if name:
if self.search(
[('internal_code', '=ilike', name)] + args,
limit=limit):
return [('internal_code', '=ilike', name)] + args
else:
return ['|', ('display_name', 'ilike', name),
('ref', 'ilike', name)] + args
return args
def _search_custom_search(self, operator, value):
res = self._get_search_domain(value, operator=operator)
return res
@api.multi
def _get_custom_search(self):
return False
custom_search = fields.Char(
compute='_get_custom_search',
string='Busqueda Inteligente',
search='_search_custom_search'
)
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, fields
class res_partner(models.Model):
_inherit = 'res.partner'
@api.model
def name_search(
self, name, args=None, operator='ilike', limit=100):
recs = self.search(self._get_search_domain(
name, args=args, operator=operator, limit=limit), limit=limit)
if not recs:
return super(res_partner, self).name_search(
name=name, args=args, operator=operator, limit=limit)
return recs.name_get()
@api.model
def _get_search_domain(self, name, args=None, operator='ilike', limit=100):
if not args:
args = []
if name:
if self.search(
[('internal_code', '=ilike', name)],
limit=limit):
return [('internal_code', '=ilike', name)]
else:
return ['|', ('display_name', 'ilike', name),
('ref', 'ilike', name)]
return args
def _search_custom_search(self, operator, value):
res = self._get_search_domain(value, operator=operator)
return res
@api.multi
def _get_custom_search(self):
return False
custom_search = fields.Char(
compute='_get_custom_search',
string='Busqueda Inteligente',
search='_search_custom_search'
)
| Python | 0 |
6807ce92c5d0a26a43db8cb25ef5ffd8b8ff6277 | Add skeleton cryptdev module | salt/modules/cryptdev.py | salt/modules/cryptdev.py | # -*- coding: utf-8 -*-
'''
Salt module to manage Unix cryptsetup jobs and the crypttab file
'''
# Import python libraries
from __future__ import absolute_import
import logging
# Import salt libraries
import salt
# Set up logger
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'cryptdev'
def __virtual__():
'''
Only load on POSIX-like systems
'''
if salt.utils.is_windows():
return (False, 'The cryptdev module cannot be loaded: not a POSIX-like system')
else:
return True
| Python | 0 | |
9b457a08ce433b574f186bb1b32da666edee485a | Complete sol by recursion | lc0108_convert_sorted_array_to_binary_search_tree.py | lc0108_convert_sorted_array_to_binary_search_tree.py | """Leetcode 108. Convert Sorted Array to Binary Search Tree
Easy
URL: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
Given an array where elements are sorted in ascending order,
convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree
in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted array: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following
height balanced BST:
0
/ \
-3 9
/ /
-10 5
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionRecur(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if not nums:
return None
left, right = 0, len(nums) - 1
mid = left + (right - left) // 2
root = TreeNode(nums[mid])
root.left = self.sortedArrayToBST(nums[:mid])
root.right = self.sortedArrayToBST(nums[mid+1:])
return root
def main():
nums = [-10, -3, 0, 5, 9]
root = SolutionRecur().sortedArrayToBST(nums)
if __name__ == '__main__':
main()
| Python | 0.006524 | |
26cb7ea15886f78d781018e0fb9398f5a61acadd | Create time_keeper.py | fx_collect/time_keeper.py | fx_collect/time_keeper.py | from datetime import datetime, timedelta
import numpy as np
import pytz
class TimeKeeper(object):
"""
The TimeKeeper class controls all date time logic.
"""
def __init__(self, init_dt):
# init_dt is the current daily bar
# init_dt = datetime(2018,1,29,22,0)
self._create_current_tradingweek(init_dt)
self._create_datetime_ranges()
def _create_current_tradingweek(self, init_dt):
"""Takes the latest daily bar date time to setup
initial system dates."""
if init_dt.weekday() == 6: # Its Sunday
self.wk_str = init_dt
else:
self.wk_str = init_dt - timedelta(days=init_dt.weekday()+1)
self.wk_end = self.wk_str + timedelta(days=5)
def _create_datetime_ranges(self):
"""Time frame date time ranges"""
self.minutely_rng = self._minutely_rng()
self.hourly_rng = self._hourly_rng()
self.daily_rng = self._daily_rng()
self.weekly_rng = self._weekly_rng()
self.monthly_rng = self._monthly_rng()
def _calc(self, lu, rng):
""" All other time frame calculations"""
try:
x = rng[rng > lu][0]
index = np.where(rng==x)[0].item()
if index-2 < 0:
x = rng[-1].item()
fin = x - timedelta(days=7)
else:
fin = rng[index-2].item()
except IndexError:
index = np.where(rng==rng[-1])[0].item()
fin = rng[index-1].item()
return fin
def _mcalc(self, lu):
"""Perform monthly calculation"""
rng = self.monthly_rng
try:
x = rng[rng > lu][0]
index = np.where(rng==x)[0].item()
if index-2 < 0:
x = rng[index-2].item()
fin = x.replace(year=x.year-1)
else:
fin = rng[index-2].item()
except IndexError:
index = np.where(rng==rng[-1])[0].item()
fin = rng[index-1].item()
return fin
def _minutely_rng(self):
"""Returns minutely numpy array"""
return np.arange(
self.wk_str, self.wk_end, dtype='datetime64[m]'
)
def _hourly_rng(self):
"""Returns hourly numpy array"""
return np.arange(
self.wk_str, self.wk_end, dtype='datetime64[h]'
)
def _daily_rng(self):
"""Returns daily numpy array"""
d = np.arange(
self.wk_str, self.wk_end, dtype='datetime64[D]'
)
return d + np.timedelta64(22, 'h')
def _weekly_rng(self):
"""Returns all Saturdays weekly numpy array"""
str_sat = self.wk_str.replace(month=1,day=1)
str_sat += timedelta(days=5-str_sat.weekday())
end_sat = str_sat + timedelta(weeks=52)
w = np.arange(str_sat, end_sat, dtype='datetime64[D]')
return self._ny((w + np.timedelta64(22, 'h'))[0::7])
def _monthly_rng(self):
"""Returns all end of month numpy array"""
m = np.arange(
self.wk_str.replace(day=1,month=1),
self.wk_str+timedelta(weeks=52),
dtype='datetime64[M]'
) + np.timedelta64(1,'M')
end_of_month = m - np.timedelta64(1,'D')
return self._ny(end_of_month + np.timedelta64(22, 'h'))
def _ny(self, np_arr):
"""
Adjusts the time based on the US/Eastern timezone,
for New York opening.
"""
l = []
offset = 0
for i in np_arr:
dt = i.item()
newyork = pytz.timezone('US/Eastern')
nydt = newyork.localize(dt, is_dst=None)
assert nydt.tzinfo is not None
assert nydt.tzinfo.utcoffset(nydt) is not None
isdst = bool(nydt.dst())
if isdst:
offset = 1
l.append(i - offset)
return np.array(l)
def stop_date(self):
"""Returns the system stop time"""
return self.wk_end + timedelta(minutes=1)
def utc_now(self):
"""Returns the current time in UTC"""
return datetime.utcnow()
def return_ole_zero(self):
return datetime(1899, 12, 30) # OLE_ZERO
def sub_dt(self, dt):
return dt - timedelta(minutes=1)
def add_dt(self, dt):
return dt + timedelta(minutes=1)
def calculate_date(self, last_update, time_frame):
"""Calculation selector """
lu = last_update.replace(second=0,microsecond=0)
tf = time_frame[:1]
delta = int(time_frame[1:])
if tf == 'm': # minutely
rng = self.minutely_rng[0::delta]
elif tf == 'H': # houly
rng = self.hourly_rng[0::delta]
elif tf == 'D': # daily
rng = self.daily_rng
elif tf == 'W': # weekly
rng = self.weekly_rng
elif tf == 'W': # weekly
rng = self.weekly_rng
if tf is not 'M':
# All other time frames
return self._calc(lu, rng)
else:
# Monthly
return self._mcalc(lu)
| Python | 0.000012 | |
e36df0c827de666fa00d9b806759cde0f0cb1697 | Test code | game.py | game.py | class Room:
def __init__(self, configuration={}):
self.doors = configuration
class Merchant:
def __init__(self, markup=1.2, markdown=0.8):
self.inventory = []
self.markup = markup
self.markdown = markdown
def add_item(self, item):
# Adds an item to the merchant's inventory
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
self.inventory.append(item)
def get_selling_offers(self):
# Lists all items in the merchant's inventory
# and adds the markup fee
offers = []
for item in self.inventory:
offer = (item, item.value*self.markup)
offers.append(offer)
return offers
def get_buying_offers(self, items):
# Generates buying offers on the items in 'items'
offers = []
for item in items:
offer = (item, item.value*self.markdown)
offers.append(offer)
return offers
class Town(Room):
def __init__(self, name, room_configuration={}):
super().__init__(room_configuration)
self.name = name
class Item:
def __init__(self, name, description, value):
self.name = name
self.description = description
self.value = value # The item's monetary value
@property
def value(self):
return self.value
@value.setter
def x(self, value):
if value < 0:
raise ValueError("Item value cannot be less than 0")
else:
self.value = value
class Weapon(Item):
def __init__(self, name, description, damage=0, value=0):
self.damage = damage
super().__init__(name, description, value)
# Create new place with the name "My Place"
my_place = Town("My Place")
# Create new merchant with markup=1.2 and markdown=0.8
my_merchant = Merchant(1.2, 0.8)
# Attach the merchant to the place
my_place.merchant = my_merchant
# Create new weapon with the name "Sword", the description "A plain sword."
# a damage value of 20, and a monetary value of 10
sword = Weapon("Sword", "A plain sword.", 20, 10)
# Ditto
axe = Weapon("Axe", "A plain axe.", 30, 20)
# Ditto
pickaxe = Weapon("Pickaxe", "A plain pickaxe.", 10, 10)
# Add our weapons to the merchant we attached to our place
my_place.merchant.add_item(sword)
my_place.merchant.add_item(axe)
my_place.merchant.add_item(pickaxe)
# Create a new room
# Pass the configuration dict, which says that the east door should lead to my_place
starting_room = Room({'east': my_place})
# Get selling offers from the merchant in the place that is behind the east door of our room
selling_offers = starting_room.doors['east'].merchant.get_selling_offers()
selling_offers_formatted = []
for offer in selling_offers:
selling_offers_formatted.append((offer[0], offer[1]))
print(selling_offers_formatted) | Python | 0.000001 | |
8afc0186fa35a25692ee8f895873d888c5f0540e | Create helm.py | helm.py | helm.py | """
Provides some helper functions for helm.
(C) Oliver Schoenborn
License: modified MIT, ie MIT plus the following restriction: This code
can be included in your code base only as the complete file, and this
license comment cannot be removed or changed. This code was taken from
https://github.com/schollii/sandals/blob/master/json_sem_hash.py.
GIT_COMMIT: EDIT WHEN COPY
If you find modifications necessary, please contribute a PR so that the
open-source community can benefit the same way you benefit from this file.
"""
import hashlib
import json
import shutil
from pathlib import Path
from subprocess import run, PIPE
from tempfile import TemporaryDirectory
from typing import Union, List, Tuple
import pyaml
from json_sem_hash import get_json_sem_hash, JsonTree
def get_proc_out(cmd: Union[str, List[str]]) -> str:
"""
Run the given command through subprocess.run() and return the
output as a string.
"""
shell = type(cmd) is str
proc = run(cmd, shell=shell, stdout=PIPE)
return proc.stdout.decode('UTF-8').strip()
def get_helm_install_merged_values_hash(
service_name: str, namespace: str, *xtra_helm_args,
values_files: List[str] = None, sets: List[str] = None,
chart_dir_path: str = None) -> Tuple[str, JsonTree]:
"""
Get a hash represents the merged values from a helm install command for a
given service name, values files, sets, addition helm args, into a
specific namespace. The chart path should be the relative or absolute
path to the folder containing the Chart.yaml. The namespace is needed
because we don't know whether a dry-run (used to compute the hash) uses it.
Returns a pair: the first item is the hash value, the second is a dict
representing the
"""
dry_run_cmd = [
'helm',
'upgrade',
'--install',
'--namespace',
namespace,
service_name,
chart_dir_path,
'--dry-run',
'--output',
'json',
]
if values_files:
for vf in values_files:
dry_run_cmd.append('-f')
dry_run_cmd.append(vf)
if sets:
for s in sets:
dry_run_cmd.append('--set')
dry_run_cmd.append(s)
dry_run_cmd.extend(xtra_helm_args)
merged_values = json.loads(get_proc_out(dry_run_cmd))['config']
config_hash = get_json_sem_hash(merged_values, hasher=hashlib.md5)
return config_hash, merged_values
def create_versioned_chart(
service_name: str, build_tag: str, namespace: str, *xtra_helm_args,
values_files: List[str] = None, sets: List[str] = None,
chart_dir_path: str = None, chart_prefix: str = '') -> Tuple[str, str, str]:
"""
Create a versioned chart at given path, for the given service, build tag, values
files, and sets, for later installation in given namespace. The chart path is
path to the folder containing the Chart.yaml. By default, this is
chart/{chart_prefix}{service_name}.
The return value is a triplet, identifying the packaged chart's full name,
(which will start with chart prefix), chart version string (contained in
full name), and the full hash of the values used for (prior or to be done)
installation.
"""
if not chart_dir_path:
chart_dir_path = f'chart/{chart_prefix}{service_name}'
values_hash, merged_values = get_helm_install_merged_values_hash(
service_name, namespace, *xtra_helm_args,
chart_dir_path=chart_dir_path, values_files=values_files, sets=sets)
# installation tag is the md5 hash (split into 4 dot-separated segments, for readability)
# seg_len = 8
# num_segs = int(len(config_hash)/seg_len)
# install_tag = '.'.join(config_hash[seg_len * i: seg_len * i + seg_len] for i in range(num_segs))
# installation tag is first N chars of the md5 hash
keep_len = 12
install_tag = values_hash[:keep_len]
chart_version = f'{build_tag}-{install_tag}'
print('Chart version:', chart_version)
# print('App "version":', install_tag)
with TemporaryDirectory(prefix='chart-', suffix=f'-{chart_version}') as tmpdirname:
print(f'Created tmp chart folder {tmpdirname}')
tmp_chart_path = Path(tmpdirname, Path(chart_dir_path).name)
shutil.copytree(chart_dir_path, tmp_chart_path)
# run(f'ls -R {tmpdirname}', shell=True)
tmp_values_path = Path(tmp_chart_path, 'values.yaml')
tmp_values_path.write_text(pyaml.dump(merged_values))
# print(tmp_values_path.read_text(), '\n')
package_cmd = f'helm package {tmp_chart_path} --version {chart_version}'
run(package_cmd, shell=True)
chart_fullname = f'{chart_prefix}{service_name}-{chart_version}.tgz'
assert Path(chart_fullname).exists()
return chart_fullname, chart_version, values_hash
def install_chart(
service_name: str, build_tag: str, namespace: str, *xtra_helm_args,
values_files: List[str] = None, sets: List[str] = None,
chart_dir_path: str = None, chart_prefix: str = '') -> Tuple[str, str, str]:
"""
Install a helm chart (via helm upgrade --install). This uses a somewhat unusual
convention for the chart version and app version: there is no app version, and
instead the chart version is the build tag + an md5 hash that represents the
merged values (all values files, all sets, and the default values file of the
chart). Moreover, the chart is repackaged to contain the merged values instead
of the original default values file.
The final chart is therefore completely standalone: it contains all the merged
values, and the Chart.yaml has the chart version computed. It can be installed
repeatedly without any values files. It can also be used as basis for other
installations that also use this method.
This function returns the triplet returned by create_versioned_chart().
"""
chart_fullname, chart_version, values_hash = create_versioned_chart(
service_name, build_tag, namespace, *xtra_helm_args,
values_files=values_files, sets=sets,
chart_dir_path=chart_dir_path, chart_prefix=chart_prefix)
install_cmd = [
'helm',
'upgrade',
'--install',
'--namespace',
namespace,
service_name,
chart_fullname,
] + list(xtra_helm_args)
print(' '.join(install_cmd))
run(install_cmd)
return chart_fullname, chart_version, values_hash
| Python | 0.000001 | |
e9eed6b6e99e948ed2863fcc45a037b0e2b1e80f | fix import in tornado worker | gunicorn/workers/gtornado.py | gunicorn/workers/gtornado.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop, PeriodicCallback
from gunicorn.workers.base import Worker
class TornadoWorker(Worker):
def watchdog(self):
self.notify()
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s" % self)
self.ioloop.stop()
def run(self):
self.socket.setblocking(0)
self.ioloop = IOLoop.instance()
PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()
server = HTTPServer(self.app, io_loop=self.ioloop)
server._socket = self.socket
server.start(num_processes=1)
self.ioloop.start()
| # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop, PeriodicCallback
from gunicorn.workers.base import Worker
class TornadoWorker(Worker):
def watchdog(self):
self.notify()
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s" % self)
self.ioloop.stop()
def run(self):
self.socket.setblocking(0)
self.ioloop = IOLoop.instance()
PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()
server = HTTPServer(self.app, io_loop=self.ioloop)
server._socket = self.socket
server.start(num_processes=1)
self.ioloop.start()
| Python | 0.000001 |
96451643294003992e6d73ec34876badae177ed8 | Add PULSE support | libpebble2/communication/transports/pulse.py | libpebble2/communication/transports/pulse.py | from __future__ import absolute_import
__author__ = 'Liam McLoughlin'
import struct
try:
import pulse2
except ImportError:
pass
from . import BaseTransport, MessageTargetWatch
from libpebble2.exceptions import ConnectionError, PebbleError
class PULSETransport(BaseTransport):
"""
Represents a direct connection to a physical/virtual Pebble uses the PULSEv2 interface.
This transport expects to be given a PULSE2 Link object.
:param connection: A PULSE2 Interface object to tunnel Pebble Protocol over.
:type link: pulse2.link.Interface
"""
must_initialise = True
PPOPULSE_PORT = 0x3e22
OPCODE_PROTOCOL_DATA = 0x1
OPCODE_CONNECT = 0x2
OPCODE_DISCONNECT = 0x3
def __init__(self, link):
self.link = link
self.connection = None
self.buffer = b''
try:
import pulse2
except ImportError:
raise PebbleError('pulse2 package not installed: it is required for PULSE transport')
@staticmethod
def _opcode(opcode):
return struct.pack('B', opcode)
@staticmethod
def _chunks(list_items, chunk_length):
for i in xrange(0, len(list_items), chunk_length):
yield list_items[i:i+chunk_length]
def connect(self):
self.connection = self.link.open_socket('reliable', self.PPOPULSE_PORT)
if not self.connection:
raise ConnectionError('Failed to open PPoPULSE socket')
self._send_with_opcode(self.OPCODE_CONNECT)
def disconnect(self):
if self.connected:
try:
self._send_with_opcode(self.OPCODE_DISCONNECT)
except pulse2.exceptions.SocketClosed:
pass
self.connection.close()
self.connection = None
@property
def connected(self):
return self.connection is not None
def read_packet(self):
while self.connected:
if len(self.buffer) >= 2:
length, = struct.unpack('!H', self.buffer[:2])
length += 4
if len(self.buffer) >= length:
msg_data = self.buffer[:length]
self.buffer = self.buffer[length:]
return MessageTargetWatch(), msg_data
try:
packet = self.connection.receive(block=True)
except (AttributeError, pulse2.exceptions.SocketClosed):
self.connection = None
raise ConnectionError('PULSE transport closed')
assert packet[0] == self._opcode(self.OPCODE_PROTOCOL_DATA)
self.buffer += packet[1:]
def send_packet(self, message, target=MessageTargetWatch()):
assert isinstance(target, MessageTargetWatch)
for chunk in self._chunks(message, self.connection.mtu - 1):
self._send_with_opcode(self.OPCODE_PROTOCOL_DATA, chunk)
def _send_with_opcode(self, opcode, body=None):
assert self.connected
data = self._opcode(opcode)
if body:
data += body
self.connection.send(data)
| Python | 0 | |
4fd03b93f7c2ff31b6a7ab6bf6d404cc579a6bf8 | Rewrite download_hash in Python (#5995) | scripts/download_hash.py | scripts/download_hash.py | #!/usr/bin/env python3
# After a new version of Kubernetes has been released,
# run this script to update roles/download/defaults/main.yml
# with new hashes.
import hashlib
import sys
import requests
from ruamel.yaml import YAML
MAIN_YML = "../roles/download/defaults/main.yml"
def open_main_yaml():
yaml = YAML()
yaml.explicit_start = True
yaml.preserve_quotes = True
yaml.width = 4096
with open(MAIN_YML, "r") as main_yml:
data = yaml.load(main_yml)
return data, yaml
def download_hash(versions):
architectures = ["arm", "arm64", "amd64"]
downloads = ["kubelet", "kubectl", "kubeadm"]
data, yaml = open_main_yaml()
for download in downloads:
checksum_name = f"{download}_checksums"
for arch in architectures:
for version in versions:
if not version.startswith("v"):
version = f"v{version}"
url = f"https://storage.googleapis.com/kubernetes-release/release/{version}/bin/linux/{arch}/{download}"
download_file = requests.get(url, allow_redirects=True)
download_file.raise_for_status()
sha256sum = hashlib.sha256(download_file.content).hexdigest()
data[checksum_name][arch][version] = sha256sum
with open(MAIN_YML, "w") as main_yml:
yaml.dump(data, main_yml)
print(f"\n\nUpdated {MAIN_YML}\n")
def usage():
print(f"USAGE:\n {sys.argv[0]} [k8s_version1] [[k8s_version2]....[k8s_versionN]]")
def main(argv=None):
if not argv:
argv = sys.argv[1:]
if not argv:
usage()
sys.exit(1)
download_hash(argv)
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
41e115305a4de332689199ecf6a52a37c0d72e55 | Fix #108 | src/collectors/memory/memory.py | src/collectors/memory/memory.py | # coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
#'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if (name not in _KEY_MAPPING
and 'detailed' not in self.config):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.available, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| # coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
#'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if (name not in _KEY_MAPPING
and 'detailed' not in self.config):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| Python | 0.000001 |
3b27b1d6b1c4739b8d456703542ec8182ce12277 | Add a Wordpress+MySQL composed instance functional test case | heat/tests/functional/test_WordPress_Composed_Instances.py | heat/tests/functional/test_WordPress_Composed_Instances.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', 'composed', 'WordPressComposedInstances'])
class WordPressComposedInstancesFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'WordPress_Composed_Instances.template'
self.func_utils = util.FuncUtils()
self.func_utils.prepare_jeos('F17', 'x86_64', 'cfntools')
self.func_utils.create_stack(template, 'F17')
self.func_utils.check_cfntools()
self.func_utils.wait_for_provisioning()
self.func_utils.check_user_data(template)
self.ssh = self.func_utils.get_ssh_client()
def test_instance(self):
# ensure wordpress was installed by checking for expected
# configuration file over ssh
wp_file = '/etc/wordpress/wp-config.php'
stdin, stdout, sterr = self.ssh.exec_command('ls ' + wp_file)
result = stdout.readlines().pop().rstrip()
assert result == wp_file
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.func_utils.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
assert True == ver.verify_wordpress(stack_url)
self.func_utils.cleanup()
| Python | 0.000002 | |
1172287e38f623994b039cea0dab36ea68d18471 | add RabbitService | ics_demo/remote_services/demo.py | ics_demo/remote_services/demo.py | from base import Service
from ics_demo.helpers.base import uuidgen
class RabbitService(Service):
def it_is_my_warren(self, name):
cmd = 'mkdir -p /tmp/%s' % name
self.remote_cmd_quiet(cmd)
def put_carrot_bucket_in_my_warren(self, rabbit):
cmd = 'mkdir /tmp/%s/carrots' % rabbit.name
self.remote_cmd_quiet(cmd)
def put_a_carrot(self, rabbit):
cmd = 'touch /tmp/%s/carrots/%s' % (rabbit.name, uuidgen())
self.remote_cmd_quiet(cmd)
def my_carrots(self, rabbit):
cmd = 'ls /tmp/%s/carrots/' % rabbit.name
return self.remote_cmd_list(cmd)
| Python | 0 | |
8de10ac1bf133c41cc1d0e330714e1659e42b092 | add script to write consul-manager ip to a text file | consul/get_consul_manager_ip.py | consul/get_consul_manager_ip.py |
import os
import digitalocean
TOKEN_FILE = "/srv/secrets-newsblur/keys/digital_ocean.token"
with open(TOKEN_FILE) as f:
token = f.read().strip()
os.environ['DO_API_TOKEN'] = token
manager = digitalocean.Manager(token=token)
my_droplets = manager.get_all_droplets()
consul_manager_droplet = [d for d in my_droplets if d.name == "consul-manager"][0]
consul_manager_ip_address = consul_manager_droplet.ip_address
# write or overwrite the consul-manager ip
if "consul_manager_ip.txt" not in os.listdir('ansible'):
with open('consul_manager_ip.txt', 'w') as f:
f.write(consul_manager_ip_address) | Python | 0 | |
8f362afc34c79a90f2b1ca332a82e9901a150d64 | add the ability to start program without network connection | lib/ansible/modules/extras/cloud/vmware/vmware_vm_shell.py | lib/ansible/modules/extras/cloud/vmware/vmware_vm_shell.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ritesh Khadgaray <khadgaray () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vm_shell
short_description: Execute a process in VM
description:
- Start a program in a VM without the need for network connection
version_added: 2.0
author: "Ritesh Khadgaray (@ritzk)"
notes:
- Tested on vSphere 5.5
- Only the first match against vm_id is used, even if there are multiple matches
requirements:
- "python >= 2.6"
- PyVmomi
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter API server
required: True
username:
description:
- The username of the vSphere vCenter
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the vSphere vCenter
required: True
aliases: ['pass', 'pwd']
datacenter:
description:
- The datacenter hosting the VM
required: False
vm_id:
description:
- The identification for the VM
required: True
vm_id_type:
description:
- The identification tag for the VM
default: dns_name
choices:
- 'uuid'
- 'dns_name'
- 'inventory_path'
required: False
vm_username:
description:
- The user to connect to the VM.
required: False
vm_password:
description:
- The password used to login to the VM.
required: False
vm_shell:
description:
- The absolute path to the program to start. On Linux this is executed via bash.
required: True
vm_shell_args:
description:
- The argument to the program.
required: False
default: None
vm_shell_env:
description:
- Comma seperated list of envirnoment variable, specified in the guest OS notation
required: False
default: None
vm_shell_cwd:
description:
- The current working directory of the application from which it will be run
required: False
default: None
'''
EXAMPLES = '''
- name: shell execution
local_action:
module: vmware_vm_shell
hostname: myVSphere
username: myUsername
password: mySecret
datacenter: myDatacenter
vm_id: DNSnameOfVM
vm_username: root
vm_password: superSecret
vm_shell: /bin/echo
vm_shell_args: " $var >> myFile "
vm_shell_env:
- "PATH=/bin"
- "var=test"
vm_shell_cwd: "/tmp"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def find_vm(content, vm_id, vm_id_type="dns_name", datacenter=None):
si = content.searchIndex
vm = None
if datacenter:
datacenter = find_datacenter_by_name(content, datacenter)
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'inventory_path':
vm = si.FindByInventoryPath(inventoryPath=vm_id)
if type(vm) != type(vim.VirtualMachine):
vm = None
elif vm_id_type == 'uuid':
vm = si.FindByUuid(datacenter=datacenter, uuid=vm_id, vmSearch=True)
return vm
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
def execute_command(content, vm, vm_username, vm_password, program_path, args="", env=None, cwd=None):
creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, envVariables=env, programPath=program_path, workingDirectory=cwd)
cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec)
return cmdpid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter=dict(default=None, type='str'),
vm_id=dict(required=True, type='str'),
vm_id_type=dict(default='dns_name', type='str', choices=['inventory_path', 'uuid', 'dns_name']),
vm_username=dict(required=False, type='str'),
vm_password=dict(required=False, type='str', no_log=True),
vm_shell=dict(required=True, type='str'),
vm_shell_args=dict(default=" ", type='str'),
vm_shell_env=dict(default=None, type='list'),
vm_shell_cwd=dict(default=None, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
try:
p = module.params
content = connect_to_api(module)
vm = find_vm(content, p['vm_id'], p['vm_id_type'], p['datacenter'])
if not vm:
module.fail_json(msg='failed to find VM')
msg = execute_command(content, vm, p['vm_username'], p['vm_password'],
p['vm_shell'], p['vm_shell_args'], p['vm_shell_env'], p['vm_shell_cwd'])
module.exit_json(changed=False, virtual_machines=vm.name, msg=msg)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| Python | 0.000001 | |
32f4055b52c8768c80cf82451f6ace74af600d0c | test new analyze rewrite | lib/neuroimaging/refactoring/tests/test_analyze.py | lib/neuroimaging/refactoring/tests/test_analyze.py | import unittest
from neuroimaging.refactoring.analyze import AnalyzeImage
from neuroimaging.tests.data import repository
from neuroimaging.visualization.arrayview import arrayview
class AnalyzeImageTest(unittest.TestCase):
def setUp(self):
self.image = AnalyzeImage("rho", datasource=repository)
def test_header(self):
self.image.raw_array
def test_arrayview(self):
arrayview(self.image.raw_array)
if __name__ == '__main__': unittest.main()
| Python | 0.000002 | |
6feae8e14b4e690cb0d5c71880b9d6c167ac978b | add stub for a csv scraping script | ipeds_reporter/scripts/scrape.py | ipeds_reporter/scripts/scrape.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WIP thing to scrape ipeds for me.
"""
from selenium import webdriver
def main():
driver = webdriver.Firefox()
driver.close()
if __name__ == '__main__':
main()
| Python | 0 | |
4241d67149887f8edc0636f7cb4fdbcb22e8e98b | Create repeatings.py | job_interview_algs/repeatings.py | job_interview_algs/repeatings.py | TEXT = """abba com mother bill mother com
abba dog abba mother com"""
def secuenced_words(txt):
"""
Function identifies and displays the three words
most often repeated as a group, regardless of the
words order in the group
"""
word_list = txt.split()
collector = dict()
for idx in range(1, len(word_list)-1):
item = frozenset([word_list[idx-1], word_list[idx], word_list[idx+1]])
if item not in collector:
collector[item] = 1
else:
collector[item] += 1
return list(sorted(collector)[0])
if __name__ == "__main__":
print(secuenced_words(TEXT))
| Python | 0.000006 | |
3bd95d8789871246fb90c6eb0487d9746ef5cb27 | Migrate all project contents blocks to activity contents blocks | bluebottle/cms/migrations/0056_auto_20191106_1041.py | bluebottle/cms/migrations/0056_auto_20191106_1041.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-06 09:41
from __future__ import unicode_literals
from django.db import migrations
def migrate_project_blocks(apps, schema_editor):
ProjectsContent = apps.get_model('cms', 'ProjectsContent')
ActivitiesContent = apps.get_model('cms', 'ActivitiesContent')
Initiative = apps.get_model('initiatives', 'Initiative')
ContentType = apps.get_model('contenttypes', 'ContentType')
activity_content_ctype = ContentType.objects.get_for_model(ActivitiesContent)
for projects_content in ProjectsContent.objects.all():
activities_content = ActivitiesContent.objects.create(
title=projects_content.title,
sub_title=projects_content.sub_title,
sort_order=projects_content.sort_order,
placeholder=projects_content.placeholder,
parent_id=projects_content.parent_id,
language_code=projects_content.language_code,
polymorphic_ctype_id=activity_content_ctype.pk,
parent_type_id=projects_content.parent_type_id,
highlighted=projects_content.from_homepage
)
for project in projects_content.projects.all():
initiative = Initiative.objects.get(slug=project.slug)
for activity in initiative.activities.all():
activities_content.activities.add(activity)
activities_content.save()
projects_content.delete()
class Migration(migrations.Migration):
dependencies = [
('cms', '0055_migrate_statistics'),
]
operations = [
migrations.RunPython(migrate_project_blocks)
]
| Python | 0 | |
d64ab6f099caf90941fdb6eb99d89fbacc9e2378 | add deepbench | wa/workloads/deepbench/__init__.py | wa/workloads/deepbench/__init__.py | # Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=E1101,W0201
import os
import re
import pandas as pd
from wa import Workload, Parameter, Alias, Executable
from wa.utils.types import numeric
class Deepbench(Workload):
name = 'deepbench'
description = """
Benchmarks operations that are important to deep learning. Including GEMM
and convolution.
The benchmark and its documentation are available here:
https://github.com/baidu-research/DeepBench
.. note:: parameters of matrices used in each sub-test are added as
classifiers to the metrics. See the benchmark documentation
for the explanation of the various parameters
.. note:: at the moment only the "Arm Benchmarks" subset of DeepBench
is supported.
"""
parameters = [
Parameter('test', default='gemm',
allowed_values=['gemm', 'conv', 'sparse'],
description='''
Specifies which of the available benchmarks will be run.
gemm
Performs GEneral Matrix Multiplication of dense matrices
of varying sizes.
conv
Performs convolutions on inputs in NCHW format.
sparse
Performs GEneral Matrix Multiplication of sparse matrices
of varying sizes, and compares them to corresponding dense
operations.
'''),
]
aliases = [
Alias('deep-gemm', test='gemm'),
Alias('deep-conv', test='conv'),
Alias('deep-sparse', test='sparse'),
]
test_metrics = {
'gemm': ['time (msec)', 'GOPS'],
'conv': ['fwd_time (usec)'],
'sparse': ['sparse time (usec)', 'dense time (usec)', 'speedup'],
}
lower_is_better = {
'time (msec)': True,
'GOPS': False,
'fwd_time (usec)': True,
'sparse time (usec)': True,
'dense time (usec)': True,
'speedup': False,
}
installed = {}
def initialize(self, context):
self.exe_name = '{}_bench'.format(self.test)
if self.exe_name not in self.installed:
resource = Executable(self, self.target.abi, self.exe_name)
host_exe = context.get_resource(resource)
self.target.killall(self.exe_name)
self.installed[self.exe_name] = self.target.install(host_exe)
self.target_exe = self.installed[self.exe_name]
def setup(self, context):
self.target.killall(self.exe_name)
def run(self, context):
self.output = None
try:
timeout = 10800 if self.test == 'sparse' else 600
self.output = self.target.execute(self.target_exe, timeout=timeout)
except KeyboardInterrupt:
self.target.killall(self.exe_name)
raise
def extract_results(self, context):
if self.output:
outfile = os.path.join(context.output_directory, '{}.output'.format(self.test))
with open(outfile, 'w') as wfh:
wfh.write(self.output)
context.add_artifact('deepbench-output', outfile, 'raw', "deepbench's stdout")
def update_output(self, context):
raw_file = context.get_artifact_path('deepbench-output')
if not raw_file:
return
table = read_result_table(raw_file)
for _, row in table.iterrows():
items = dict(row)
metrics = []
for metric_name in self.test_metrics[self.test]:
metrics.append((metric_name, items.pop(metric_name)))
for name, value in metrics:
context.add_metric(name, value,
lower_is_better=self.lower_is_better[name],
classifiers=items)
def finalize(self, context):
if self.cleanup_assets:
if self.exe_name in self.installed:
self.target.uninstall(self.exe_name)
del self.installed[self.exe_name]
def numeric_best_effort(value):
try:
return numeric(value)
except ValueError:
return value
def read_result_table(filepath):
columns = []
entries = []
with open(filepath) as fh:
try:
# fast-forward to the header
line = next(fh)
while not line.startswith('----'):
line = next(fh)
header_line = next(fh)
haader_sep = re.compile(r'(?<=[) ]) ')
# Since headers can contain spaces, use two spaces as column separator
parts = [p.strip() for p in haader_sep.split(header_line)]
columns = [p for p in parts if p]
line = next(fh)
while line.strip():
if line.startswith('----'):
line = next(fh)
row = [numeric_best_effort(i) for i in line.strip().split()]
entries.append(row)
line = next(fh)
except StopIteration:
pass
return pd.DataFrame(entries, columns=columns)
| Python | 0.000001 | |
06570a926bde2ea10730062b05a2348c3020745c | Add example: filtered ensemble average. | examples/filter_ensemble_average.py | examples/filter_ensemble_average.py | import numpy as np
import matplotlib.pyplot as plt
import atomic
from ensemble_average import time_dependent_power
if __name__ == '__main__':
times = np.logspace(-7, 0, 50)
temperature = np.logspace(0, 3, 50)
density = 1e19
from atomic.pec import TransitionPool
ad = atomic.element('argon')
tp = TransitionPool.from_adf15('adas_data/pec/*ar*.dat')
ad = tp.filter_energy(2e3, 20e3, 'eV').create_atomic_data(ad)
rt = atomic.RateEquations(ad)
y = rt.solve(times, temperature, density)
taus = np.array([ 1e14, 1e15, 1e16, 1e17, 1e18])/density
plt.figure(1); plt.clf()
from filter_construction import plot_coeffs
plot_coeffs(ad, temperature, 5)
plt.ylim(1e-35, 1e-30)
plt.draw()
plt.figure(2); plt.clf()
time_dependent_power(y, taus)
plt.draw()
plt.figure(3); plt.clf()
time_dependent_power(y, taus, ensemble_average=True)
plt.draw()
plt.show()
| Python | 0 | |
d9985ec4fa37cf99e0e541c7affadd5ec9288a0c | Create multithread.py | APIs/multithread.py | APIs/multithread.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 22:59:02 2018
@author: zhurundong
"""
import time
import requests
import asyncio
import aiohttp
from concurrent.futures import ThreadPoolExecutor
NUMBERS = range(12)
URL = 'http://httpbin.org/get?a={}'
# Get http requests results
def fetch(a):
r = requests.get(URL.format(a))
return r.json()['args']['a']
start = time.time()
for num in NUMBERS:
result = fetch(num)
print('fetch({}) = {}'.format(num, result))
print('cost time: {}'.format(time.time() - start))
# Get http requests results
def fetch(a):
r = requests.get(URL.format(a))
return r.json()['args']['a']
start = time.time()
# Using ThreadPool
with ThreadPoolExecutor(max_workers = 5) as executor:
for num, result in zip(NUMBERS, executor.map(fetch, NUMBERS)):
print('fetch({}) = {}'.format(num, result))
print('cost time: {}'.format(time.time() - start))
| Python | 0.000001 | |
93f6ebde39ef0538624ad3eb94316bf8bdf69fd9 | Create N_QueensII.py | Array/N_QueensII.py | Array/N_QueensII.py | Follow up for N-Queens problem.
Now, instead outputting board configurations, return the total number of distinct solutions.
class Solution:
# @param {integer} n
# @return {integer}
def totalNQueens(self, n):
if n == 0: return 0
self.result = 0 # Here we should use the global variable, otherwise the result will not change
checklist = [-1 for i in xrange(n)]
self.queen_helper(n, 0, checklist)
return self.result
def check_helper(self, depth, i, checklist):
for k in xrange(depth):
if checklist[k] == i or abs(checklist[k] - i) == abs(depth-k):
return False
return True
def queen_helper(self, n, depth, checklist):
if depth == n:
self.result += 1; return
for i in xrange(n):
if self.check_helper(depth, i, checklist):
checklist[depth] = i
self.queen_helper(n, depth+1, checklist)
| Python | 0.000011 | |
38b4ec7164f07af7135c41c401c4f403c1061d66 | Add skeleton for parsing commands | app/main.py | app/main.py | """lazy
Usage:
lazy (new|n)
lazy (show|s) [<id>]
lazy (delete|d) [<id>]
lazy (import|i) <path>
lazy (export|e) <path> [<id>]
Options:
-h, --help: Show this help message.
"""
from docopt import docopt
def main():
# Parse commandline arguments.
args = docopt(__doc__)
if args['new'] or args['n']:
# Insert a new task.
pass
elif args['show'] or args['s']:
if args['<id>']:
# Show the task whose ID most closely matches the given ID.
pass
else:
# Show all tasks for the current user.
pass
elif args['delete'] or args['d']:
if args['<id>']:
# Delete the task with the ID that most closely matches the given
# ID.
pass
else:
# Prompt the user to input the ID of the task to delete.
# Then delete the task with the ID that matches the given one best.
pass
elif args['import'] or args['i']:
# Check if the given path exists and if so, import from it.
pass
elif args['export'] or args['e']:
# Check if it is possible to write to the given path.
if args['<id>']:
# Write only the task with the ID that matches the given one best.
pass
else:
# Write all tasks the current user has to the file.
pass
| Python | 0.000002 | |
b0f4a0abb74bc9f1cf97a49d4501c48d666b6dfe | add qt3 | autoconf/qt3.py | autoconf/qt3.py | from _external import *
import os
def unique(list):
return dict.fromkeys(list).keys()
def subdirs(files):
dirs = unique(map(os.path.dirname, files))
dirs.sort()
return dirs
def locateQt3Command(env, command, bindir):
#print 'locateQt3Command:', command
suffixes = [
'-qt3',
'3',
'',
]
progs = [command+s for s in suffixes]
for prog in progs:
path = env.WhereIs(prog, path=bindir)
if path:
return path
msg = 'Qt3 command "' + command + '" not found. Tried: ' + str(progs) + '.'
#raise Exception(msg)
print 'Warning: ', msg
return command
class Qt3Checker(LibWithHeaderChecker):
'''
Qt3 checker
'''
def __init__( self,
modules = [
'qt',
'qui',
],
uiFiles = [],
defines = ['QT_NO_KEYWORDS'],
useLocalIncludes = True ):
self.name = 'qt3'
self.libs = modules
self.uiFiles = uiFiles
self.defines = defines
self.useLocalIncludes = useLocalIncludes
def setModules(self, modules):
self.libs = modules
def declareUiFiles(self, uiFiles):
self.uiFiles = uiFiles
def initOptions(self, project, opts):
LibWithHeaderChecker.initOptions(self, project, opts)
opts.Add( 'bindir_'+self.name, 'Base directory for '+self.name, os.path.join('$dir_'+self.name, 'bin') )
return True
def configure(self, project, env):
env.EnableQtEmmitters()
bindir = '$bindir_'+self.name
moc = locateQt3Command(env, 'moc', bindir)
uic = locateQt3Command(env, 'uic', bindir)
rcc = locateQt3Command(env, 'rcc', bindir)
lupdate = locateQt3Command(env, 'lupdate', bindir)
lrelease = locateQt3Command(env, 'lrelease', bindir)
#print 'moc', moc
env.SetDefault(
QT_MOC = moc,
QT_UIC = uic,
QT_RCC = rcc,
QT_LUPDATE = lupdate,
QT_LRELEASE = lrelease,
)
# depends the developper syntax used
# maybe we need to expose these values as parameters (in initOptions)
env.Replace(
QT_UICDECLPREFIX = '', # this is the standard value for qt3
QT_UICDECLSUFFIX = '.h',
)
return BaseLibChecker.configure(self, project, env)
def check(self, project, conf):
conf.env.AppendUnique( CPPDEFINES = self.defines )
result = self.CheckLibWithHeader( conf, self.libs, header=['qapplication.h'], language='c++' )
return result
def postconfigure(self, project, env):
'''
Cas particulier. Permet d'ajouter des elements a l'environnement apres les checks de toutes les libs.
'''
if len(self.uiFiles):
uis = [env.Uic( ui ) for ui in self.uiFiles]
if self.useLocalIncludes:
env.AppendUnique( CPPPATH=subdirs(self.uiFiles) )
return True
qt3 = Qt3Checker
| Python | 0.010732 | |
b394b9846823b29e9f625b34b820dbbd485b47c9 | "nested sets" example. needs work. | examples/nested_sets/nested_sets.py | examples/nested_sets/nested_sets.py | """Celko's "Nested Sets" Tree Structure.
http://www.intelligententerprise.com/001020/celko.jhtml
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.orm import attributes
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite://', echo=True)
Base = declarative_base()
class NestedSetExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
if not instance.parent:
instance.left = 1
instance.right = 2
else:
personnel = mapper.mapped_table
right_most_sibling = connection.scalar(
select([personnel.c.rgt]).where(personnel.c.emp==instance.parent.emp)
)
connection.execute(
personnel.update(personnel.c.rgt>=right_most_sibling).values(
lft = case(
[(personnel.c.lft>right_most_sibling, personnel.c.lft + 2)],
else_ = personnel.c.lft
),
rgt = case(
[(personnel.c.rgt>=right_most_sibling, personnel.c.rgt + 2)],
else_ = personnel.c.rgt
)
)
)
instance.left = right_most_sibling
instance.right = right_most_sibling + 1
# before_update() would be needed to support moving of nodes
# after_delete() would be needed to support removal of nodes.
# [ticket:1172] needs to be implemented for deletion to work as well.
class Employee(Base):
__tablename__ = 'personnel'
__mapper_args__ = {
'extension':NestedSetExtension(),
'batch':False # allows extension to fire for each instance before going to the next.
}
parent = None
emp = Column(String, primary_key=True)
left = Column("lft", Integer, nullable=False)
right = Column("rgt", Integer, nullable=False)
def __repr__(self):
return "Employee(%s, %d, %d)" % (self.emp, self.left, self.right)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
albert = Employee(emp='Albert')
bert = Employee(emp='Bert')
chuck = Employee(emp='Chuck')
donna = Employee(emp='Donna')
eddie = Employee(emp='Eddie')
fred = Employee(emp='Fred')
bert.parent = albert
chuck.parent = albert
donna.parent = chuck
eddie.parent = chuck
fred.parent = chuck
# the order of "add" is important here. elements must be added in
# the order in which they should be INSERTed.
session.add_all([albert, bert, chuck, donna, eddie, fred])
session.commit()
print session.query(Employee).all()
# 1. Find an employee and all his/her supervisors, no matter how deep the tree.
# (the between() operator in SQLAlchemy has a bug here, [ticket:1171])
ealias = aliased(Employee)
print session.query(Employee).\
filter(ealias.left>=Employee.left).filter(ealias.left<=Employee.right).\
filter(ealias.emp=='Eddie').all()
#2. Find the employee and all his/her subordinates. (This query has a nice symmetry with the first query.)
print session.query(Employee).\
filter(Employee.left.between(ealias.left, ealias.right)).\
filter(ealias.emp=='Chuck').all()
#3. Find the level of each node, so you can print the tree as an indented listing.
for indentation, employee in session.query(func.count(Employee.emp).label('indentation') - 1, ealias).\
filter(ealias.left>=Employee.left).filter(ealias.left<=Employee.right).\
group_by(ealias.emp).\
order_by(ealias.left):
print " " * indentation + str(employee)
| Python | 0.999923 | |
3ed9dd0ca03216311771cda5f9cd3eb954a14d4f | Add boilerplate with simple test sounds | telemeta/management/commands/telemeta-test-boilerplate.py | telemeta/management/commands/telemeta-test-boilerplate.py | from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
import os
from telemeta.models import *
from timeside.core.tools.test_samples import generateSamples
class Command(BaseCommand):
help = "Setup and run a boilerplate for testing"
code = 'Tests'
def handle(self, *args, **options):
# NOT for production
# self.processor_cleanup()
# self.result_cleanup()
media_dir = 'items' + os.sep + 'tests'
samples_dir = settings.MEDIA_ROOT + media_dir
samples = generateSamples(samples_dir=samples_dir)
collection, c = MediaCollection.objects.get_or_create(title=self.code,
code=self.code)
for sample in samples.iteritems():
filename, path = sample
title = os.path.splitext(filename)[0]
path = media_dir + os.sep + filename
item, c = MediaItem.objects.get_or_create(title=title,
code=self.code + '-' + slugify(filename),
file=path, collection=collection)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.