commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
470063b8d468394432e729e7417c88263614b5f0 | Create msub_cluster.py | snakemake_ChIPseq_pipeline/msub_cluster.py | snakemake_ChIPseq_pipeline/msub_cluster.py | Python | 0.000046 | ||
a7b0fc1effd0e68018bc3c33f1dc0b952b23003b | update nav access restrictions | accelerator/migrations/0095_update_nav_tree_access_restrictions.py | accelerator/migrations/0095_update_nav_tree_access_restrictions.py | # Generated by Django 2.2.24 on 2022-03-29 16:24
from django.db import migrations
def update_nav_access_restrictions(apps, schema_editor):
NavTreeItem = apps.get_model('accelerator', 'NavTreeItem')
urls = ['/judging/panel/', '/judging/commitments/', '/nav/judging']
nav_items = NavTreeItem.objects.filter(url__in=urls)
# remove user_type restrictions
nav_items.update(user_type='')
# remove user_role restrictions
[nav_item.user_role.clear() for nav_item in nav_items]
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0094_alter_startup_organization_fields'),
]
operations = [
migrations.RunPython(update_nav_access_restrictions,
migrations.RunPython.noop)
]
| Python | 0 | |
645efb8ffcc3c9a3e41db2619430ffcb7a6d570f | Migrate Req/Ass to have verified/finished date | src/ggrc/migrations/versions/20160314140338_4fd36860d196_add_finished_date_to_request_and_.py | src/ggrc/migrations/versions/20160314140338_4fd36860d196_add_finished_date_to_request_and_.py | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
add finished date to request and assessment
Create Date: 2016-03-14 14:03:38.026877
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4fd36860d196'
down_revision = '39aec99639d5'
def upgrade_table(table):
"""Add columns finished_date and verified_date and populate them."""
op.add_column(table,
sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE {}
SET finished_date = updated_at
WHERE status in ("Finished", "Verified", "Final")
""".format(table))
op.execute("""
UPDATE {}
SET verified_date = updated_at, status = "Final"
WHERE status = "Verified"
""".format(table))
def upgrade():
upgrade_table('requests')
upgrade_table('assessments')
def downgrade():
"""Remove verified_date and finished_date columns."""
op.drop_column('assessments', 'verified_date')
op.drop_column('assessments', 'finished_date')
op.drop_column('requests', 'verified_date')
op.drop_column('requests', 'finished_date')
| Python | 0 | |
092a8081e4fcecec0dc18c1eadcec0a46c5c73fb | add the AFL wrapper program | analysis/afl.py | analysis/afl.py | #!/usr/bin/env python
import argparse
import os
import subprocess
from multiprocessing import Process
from functools import reduce
MEMORY='2G'
def getArch(iid):
query = 'select arch from image where id=' + iid + ';'
arch = dbquery(query)[0][0]
if arch == 'armel':
arch = 'arm'
elif arch == 'mipseb':
arch = 'mips'
return arch
def dbquery(query):
import psycopg2
db = psycopg2.connect(dbname = "firmware", user = "firmadyne", password = "firmadyne", host = "127.0.0.1")
ret = None
try:
cur = db.cursor()
cur.execute(query)
except BaseException:
traceback.print_exc()
finally:
if cur:
ret = cur.fetchall()
cur.close()
return ret
def extract(iid, bindir):
print('Extracting binaries......')
query = '''select filename from object_to_image where iid=''' + iid + ''' and score>0 and (mime='application/x-executable; charset=binary' or mime='application/x-object; charset=binary' or mime='application/x-sharedlib; charset=binary') order by score DESC;'''
wanted = dbquery(query)
wanted = reduce((lambda a, b: a + b), wanted)
wanted = map((lambda a: '.' + a), wanted)
wanted = reduce((lambda a, b: a + ' ' + b), wanted)
cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted
subprocess.run([cmd], shell=True)
print('Extracting library links......')
query = '''select filename from object_to_image where iid=''' + iid + ''' and regular_file='f';'''
wanted = dbquery(query)
wanted = reduce((lambda a, b: a + b), wanted)
wanted = filter((lambda a: 'lib' in a), wanted)
wanted = map((lambda a: '.' + a), wanted)
wanted = reduce((lambda a, b: a + ' ' + b), wanted)
cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted
subprocess.run([cmd], shell=True)
def setenvs(iid):
arch = getArch(iid)
afl_path = subprocess.run(['which', 'afl-qemu-trace'], stdout=subprocess.PIPE).stdout.decode().replace('\n', '') + '-' + arch
if len(afl_path) == 0:
print("Unknown architecture: " + arch)
sys.exit(1)
env = dict(os.environ.copy(), **{'AFL_INST_LIBS':'1'}, **{'AFL_EXIT_WHEN_DONE':'1'}, **{'AFL_NO_AFFINITY':'0'}, **{'AFL_PATH':afl_path})
return env
def runAFL(args, ENVS):
p = subprocess.Popen(args, env = ENVS)
try:
p.wait(timeout=20 * 60) # 20 min
except subprocess.TimeoutExpired:
# may check the status here to decide whether to terminate
p.terminate()
def fuzz(target, bindir, outdir, ENVS):
print('Fuzzing ' + target + '......')
outdir += '/' + target
if not os.path.isdir(outdir):
if os.path.exists(outdir):
os.remove(outdir)
os.makedirs(outdir, 0o755)
inputcase = '/usr/share/afl/testcases/others/text'
else:
inputcase = '-'
args = ['afl-fuzz', '-Q', '-M', 'master', '-m', MEMORY, '-i', inputcase, '-o', outdir, '-L', bindir, bindir + '/' + target]
m = Process(target=runAFL, args=(args, ENVS))
m.start()
args = ['afl-fuzz', '-Q', '-S', 'slave1', '-m', MEMORY, '-i', inputcase, '-o', outdir, '-L', bindir, bindir + '/' + target]
s1 = Process(target=runAFL, args=(args, ENVS))
s1.start()
args = ['afl-fuzz', '-Q', '-S', 'slave2', '-m', MEMORY, '-i', inputcase, '-o', outdir, '-L', bindir, bindir + '/' + target]
s2 = Process(target=runAFL, args=(args, ENVS))
s2.start()
args = ['afl-fuzz', '-Q', '-S', 'slave3', '-m', MEMORY, '-i', inputcase, '-o', outdir, '-L', bindir, bindir + '/' + target]
s3 = Process(target=runAFL, args=(args, ENVS))
s3.start()
# join
m.join()
s1.join()
s2.join()
s3.join()
def process(iid, resultdir):
subprocess.run(['echo core | sudo tee /proc/sys/kernel/core_pattern >/dev/null'], shell=True)
bindir = resultdir + '/bin'
outdir = resultdir + '/out'
if not os.path.isdir(bindir):
if os.path.exists(bindir):
os.remove(bindir)
os.makedirs(bindir, 0o755)
if not os.path.isdir(outdir):
if os.path.exists(outdir):
os.remove(outdir)
os.makedirs(outdir, 0o755)
extract(iid, bindir)
AFL_ENVS = setenvs(iid)
query = '''select filename from object_to_image where iid=''' + iid + ''' and score>0 and mime='application/x-executable; charset=binary' order by score DESC;'''
targets = dbquery(query)
targets = reduce((lambda a, b: a + b), targets)
targets = list(map((lambda a: '.' + a), targets))
for target in targets:
fuzz(target, bindir, outdir, AFL_ENVS)
def main():
parser = argparse.ArgumentParser(description="AFL wrapper program")
parser.add_argument("id", action="store", help="firmware image ID")
arg = parser.parse_args()
resultdir = os.path.dirname(os.path.realpath(__file__)) + '/../results/' + arg.id + '/afl'
process(arg.id, resultdir)
if __name__ == '__main__':
main ()
| Python | 0 | |
b05ab5401e716eab305625ad369ec5d362ae491a | add andromercury tool | andromercury.py | andromercury.py | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, re, os
from optparse import OptionParser
from androguard.core.bytecodes import apk
sys.path.append("./elsim/")
from elsim.elsign import dalvik_elsign
sys.path.append("./mercury/client")
from merc.lib.common import Session
option_0 = { 'name' : ('-l', '--list'), 'help' : 'list all packages', 'nargs' : 1 }
option_1 = { 'name' : ('-i', '--input'), 'help' : 'get specific packages (a filter)', 'nargs' : 1 }
option_2 = { 'name' : ('-r', '--remotehost'), 'help' : 'specify ip of emulator/device', 'nargs' : 1 }
option_3 = { 'name' : ('-p', '--port'), 'help' : 'specify the port', 'nargs' : 1 }
option_4 = { 'name' : ('-o', '--output'), 'help' : 'output directory to write packages', 'nargs' : 1 }
option_5 = { 'name' : ('-b', '--database'), 'help' : 'database : use this database', 'nargs' : 1 }
option_6 = { 'name' : ('-c', '--config'), 'help' : 'use this configuration', 'nargs' : 1 }
option_7 = { 'name' : ('-v', '--verbose'), 'help' : 'display debug information', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5, option_6, option_7]
def display(ret, debug) :
print "---->", ret[0],
def main(options, arguments) :
sessionip = "127.0.0.1"
sessionport = 31415
if options.remotehost :
sessionip = options.remotehost
if options.port :
sessionport = int(options.port)
newsession = Session(sessionip, sessionport, "bind")
# Check if connection can be established
if newsession.executeCommand("core", "ping", None).data == "pong":
if options.list :
request = {'filter': options.list, 'permissions': None }
apks_info = newsession.executeCommand("packages", "info", {}).getPaddedErrorOrData()
print apks_info
elif options.input and options.output :
s = None
if options.database != None or options.config != None :
s = dalvik_elsign.MSignature( options.database, options.config, options.verbose != None, ps = dalvik_elsign.PublicSignature)
request = {'filter': options.input, 'permissions': None }
apks_info = newsession.executeCommand("packages", "info", request).getPaddedErrorOrData()
print apks_info
for i in apks_info.split("\n") :
if re.match("APK path:", i) != None :
name_app = i.split(":")[1][1:]
print name_app,
response = newsession.downloadFile(name_app, options.output)
print response.data, response.error,
if s != None :
a = apk.APK( options.output + "/" + os.path.basename(name_app) )
if a.is_valid_APK() :
display( s.check_apk( a ), options.verbose )
print
else:
print "\n**Network Error** Could not connect to " + sessionip + ":" + str(sessionport) + "\n"
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
| Python | 0 | |
bad82f3c77dbeebdc2332d193f0c8290c5186862 | add rudimentary api | frontend/app.py | frontend/app.py | import os
import sys
import inspect
import json
from flask import Flask
# move to top level
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from api.reporting import Report
from api.apiutils import Scope, Relation
from modelstore.elasticstore import StoreHandler
from knowledgerepr import fieldnetwork
from algebra import API
path_to_serialized_model = parentdir + "/test/testmodel/"
network = fieldnetwork.deserialize_network(path_to_serialized_model)
store_client = StoreHandler()
api = API(network, store_client)
keyword_search = api.keyword_search
neighbor_search = api.neighbor_search
union = api.union
intersection = api.intersection
difference = api.difference
db = Scope.DB
source = Scope.SOURCE
feld = Scope.FIELD
content = Scope.CONTENT
schema = Relation.SCHEMA
schema_sim = Relation.SCHEMA_SIM
content_sim = Relation.CONTENT_SIM
entity_sim = Relation.ENTITY_SIM
pkfk = Relation.PKFK
app = Flask(__name__)
@app.route('/query/<query>')
def query(query):
try:
res = eval(query)
res = json.dumps(res.data)
except Exception as e:
res = "error: " + str(e)
return res
@app.route('/convert/<nid>')
def convert(nid):
try:
import pdb; pdb.set_trace()
nid = int(nid)
res = api._general_to_drs(nid)
res = json.dumps(res.data)
except Exception as e:
res = "error: " + str(e)
return res
if __name__ == '__main__':
app.run()
| Python | 0.000007 | |
37baa669ed1e00fabddd33478fa75f4047075ce3 | Create Python object detection script. | cs473vision/ObjectDetector.py | cs473vision/ObjectDetector.py | '''
Created on Feb 28, 2014
@author: Vance Zuo
'''
import numpy
import cv2
class ObjectDetector(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
self.bg_img = None
self.fg_img = None
return
def load_image(self, bg_path, fg_path):
self.bg_img = cv2.imread(bg_path)
self.fg_img = cv2.imread(fg_path)
return True
def subtract_background(self):
# Take simple difference
naive = cv2.absdiff(self.bg_img, self.bg_img)
cv2.imwrite("naive.png", naive)
# MOG Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG.png", fg_mask)
# MOG2 Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG2()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG2.png", fg_mask)
return | Python | 0 | |
4aacc8d55c138c405d561bbf9ddfd9ddab483e62 | add wxPython example. | trypython/extlib/gui/wx01.py | trypython/extlib/gui/wx01.py | """
wxPython についてのサンプルです
お決まりの Hello world について
REFERENCES:: http://bit.ly/2OcHRh7
"""
# noinspection PyPackageRequirements
import wx
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
app = wx.App()
frm = wx.Frame(parent=None, title='Hello World')
frm.Show()
app.MainLoop()
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| Python | 0 | |
9566d71a267015005ad937cd92bd105d2ffff274 | print users with multiple matching urls | twnews/print_urls_by_user.py | twnews/print_urls_by_user.py | """
Print users who were found for multiple urls.
user url1 score1 url2 score2 ...
"""
import codecs
from collections import defaultdict
import json
import sys
from . import __data__
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
def print_urls_by_user(tweets_file=__data__ + '/tweets.json'):
user2urls = defaultdict(lambda: set())
url2score = defaultdict(lambda: 0.)
inf = codecs.open(tweets_file, 'rt', 'utf-8')
for line in inf:
js = json.loads(line)
if 'url_query' in js: # valid line
user2urls[js['user']['screen_name']].add(js['url_query'])
url2score[js['url_query']] = float(js['url_score'])
for user, urls in user2urls.iteritems():
if len(urls) > 1:
print user + '\t' + '\t'.join('%s\t%.3f' % (u, url2score[u]) for u in urls)
if __name__ == '__main__':
print_urls_by_user()
| Python | 0.000006 | |
bc9c057d57d4dbc2e3c70eaf3ac182df2b334107 | fix bluffing again | player.py | player.py | import logging
import card as c
class Player(object):
"""
This class represents a player.
It is basically a doubly-linked ring list with the option to reverse the
direction. On initialization, it will connect itself to a game and its
other players by placing itself behind the current player.
"""
def __init__(self, game, user):
self.cards = list()
self.game = game
self.user = user
self.logger = logging.getLogger(__name__)
# Check if this player is the first player in this game.
if game.current_player:
self.next = game.current_player
self.prev = game.current_player.prev
game.current_player.prev.next = self
game.current_player.prev = self
else:
self._next = self
self._prev = self
game.current_player = self
for i in range(7):
self.cards.append(self.game.deck.draw())
self.bluffing = False
self.drew = False
def leave(self):
""" Leave the current game """
self.next.prev = self.prev
self.prev.next = self.next
self.next = None
self.prev = None
def __repr__(self):
return repr(self.user)
def __str__(self):
return str(self.user)
@property
def next(self):
return self._next if not self.game.reversed else self._prev
@next.setter
def next(self, player):
if not self.game.reversed:
self._next = player
else:
self._prev = player
@property
def prev(self):
return self._prev if not self.game.reversed else self._next
@prev.setter
def prev(self, player):
if not self.game.reversed:
self._prev = player
else:
self._next = player
def playable_cards(self):
""" Returns a list of the cards this player can play right now """
playable = list()
last = self.game.last_card
self.logger.debug("Last card was " + str(last))
cards = self.cards
if self.drew:
cards = self.cards[-1:]
for card in cards:
if self.card_playable(card, playable):
self.logger.debug("Matching!")
playable.append(card)
# You may only play a +4 if you have no cards of the correct color
self.bluffing = False
for card in playable:
if card.color == last.color:
self.bluffing = True
break
# You may not play a chooser or +4 as your last card
if len(self.cards) == 1 and (self.cards[0].special == c.DRAW_FOUR or
self.cards[0].special == c.CHOOSE):
return list()
return playable
def card_playable(self, card, playable):
""" Check a single card if it can be played """
is_playable = True
last = self.game.last_card
self.logger.debug("Checking card " + str(card))
if (card.color != last.color and card.value != last.value and
not card.special):
self.logger.debug("Card's color or value doesn't match")
is_playable = False
if last.value == c.DRAW_TWO and not \
card.value == c.DRAW_TWO and self.game.draw_counter:
self.logger.debug("Player has to draw and can't counter")
is_playable = False
if last.special == c.DRAW_FOUR and self.game.draw_counter:
self.logger.debug("Player has to draw and can't counter")
is_playable = False
if (last.special == c.CHOOSE or last.special == c.DRAW_FOUR) and \
(card.special == c.CHOOSE or card.special == c.DRAW_FOUR):
self.logger.debug("Can't play colorchooser on another one")
is_playable = False
if not last.color or card in playable:
self.logger.debug("Last card has no color or the card was "
"already added to the list")
is_playable = False
return is_playable
| import logging
import card as c
class Player(object):
"""
This class represents a player.
It is basically a doubly-linked ring list with the option to reverse the
direction. On initialization, it will connect itself to a game and its
other players by placing itself behind the current player.
"""
def __init__(self, game, user):
self.cards = list()
self.game = game
self.user = user
self.logger = logging.getLogger(__name__)
# Check if this player is the first player in this game.
if game.current_player:
self.next = game.current_player
self.prev = game.current_player.prev
game.current_player.prev.next = self
game.current_player.prev = self
else:
self._next = self
self._prev = self
game.current_player = self
for i in range(7):
self.cards.append(self.game.deck.draw())
self.bluffing = False
self.drew = False
def leave(self):
""" Leave the current game """
self.next.prev = self.prev
self.prev.next = self.next
self.next = None
self.prev = None
def __repr__(self):
return repr(self.user)
def __str__(self):
return str(self.user)
@property
def next(self):
return self._next if not self.game.reversed else self._prev
@next.setter
def next(self, player):
if not self.game.reversed:
self._next = player
else:
self._prev = player
@property
def prev(self):
return self._prev if not self.game.reversed else self._next
@prev.setter
def prev(self, player):
if not self.game.reversed:
self._prev = player
else:
self._next = player
def playable_cards(self):
""" Returns a list of the cards this player can play right now """
playable = list()
last = self.game.last_card
self.logger.debug("Last card was " + str(last))
cards = self.cards
if self.drew:
cards = self.cards[-1:]
for card in cards:
if self.card_playable(card, playable):
self.logger.debug("Matching!")
playable.append(card)
# You may only play a +4 if it's the only card you can play
self.bluffing = bool(len(playable) - 1)
# You may not play a chooser or +4 as your last card
if len(self.cards) == 1 and (self.cards[0].special == c.DRAW_FOUR
or self.cards[0].special == c.CHOOSE):
return list()
return playable
def card_playable(self, card, playable):
""" Check a single card if it can be played """
is_playable = True
last = self.game.last_card
self.logger.debug("Checking card " + str(card))
if (card.color != last.color and card.value != last.value and
not card.special):
self.logger.debug("Card's color or value doesn't match")
is_playable = False
if last.value == c.DRAW_TWO and not \
card.value == c.DRAW_TWO and self.game.draw_counter:
self.logger.debug("Player has to draw and can't counter")
is_playable = False
if last.special == c.DRAW_FOUR and self.game.draw_counter:
self.logger.debug("Player has to draw and can't counter")
is_playable = False
if (last.special == c.CHOOSE or last.special == c.DRAW_FOUR) and \
(card.special == c.CHOOSE or card.special == c.DRAW_FOUR):
self.logger.debug("Can't play colorchooser on another one")
is_playable = False
if not last.color or card in playable:
self.logger.debug("Last card has no color or the card was "
"already added to the list")
is_playable = False
return is_playable
| Python | 0.000009 |
e5627134d9a2c052a523f66a2ec9867b3432fae2 | Test Issue #461: ent_iob tag incorrect after setting entities. | spacy/tests/tokens/test_add_entities.py | spacy/tests/tokens/test_add_entities.py | from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
from spacy.pipeline import EntityRecognizer
import pytest
@pytest.fixture(scope="module")
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture(scope="module")
def entity_recognizer(en_vocab):
return EntityRecognizer(en_vocab, features=[(2,), (3,)])
@pytest.fixture
def animal(en_vocab):
return nlp.vocab.strings[u"ANIMAL"]
@pytest.fixture
def doc(en_vocab, entity_recognizer):
doc = Doc(en_vocab, words=[u"this", u"is", u"a", u"lion"])
entity_recognizer(doc)
return doc
def test_set_ents_iob(doc):
assert len(list(doc.ents)) == 0
tags = [w.ent_iob_ for w in doc]
assert tags == (['O'] * len(doc))
doc.ents = [(doc.vocab.strings['ANIMAL'], 3, 4)]
tags = [w.ent_iob_ for w in doc]
assert tags == ['O', 'O', 'O', 'B']
doc.ents = [(doc.vocab.strings['WORD'], 0, 2)]
tags = [w.ent_iob_ for w in doc]
assert tags == ['B', 'I', 'O', 'O']
| Python | 0 | |
7cee0980a67b827e4cf06c15e0f1c3b412f68c22 | Create main.py to actually perform the test | main.py | main.py | import io
import argparse
def initialize_argument_parser():
parser = argparse.ArgumentParser(description='Simulate Indian health solutions')
parser.add_argument('-s', '--solution', dest='solution',
help='the solution to test', default='health kiosk')
return vars(parser.parse_args())
if __name__ == "__main__":
args = initialize_argument_parser()
print 'Solution to test:', args['solution']
| Python | 0 | |
233c7f0152a05afcef62b596f7c90abe7e9eec90 | add a base pipeline for converting json to bigquery | pipeline/flatten_json.py | pipeline/flatten_json.py | """Beam pipeline for converting json scan files into bigquery tables."""
from __future__ import absolute_import
import argparse
import json
import logging
from pprint import pprint
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
bigquery_schema = {
'domain': 'string',
'ip': 'string',
'date': 'date',
'start_time': 'timestamp',
'end_time': 'timestamp',
'retries': 'integer',
'sent': 'string',
'received': 'string',
'error': 'string',
'blocked': 'boolean',
'success': 'boolean',
'fail_sanity': 'boolean',
'stateful_block': 'boolean'
}
# Future fields
"""
'row_number', 'integer',
'domain_category': 'string',
'netblock': 'string',
'asn': 'string',
'as_name': 'string',
'as_full_name': 'string',
'as_traffic': 'integer',
'as_class': 'string',
'country': 'string',
"""
def get_bigquery_schema():
"""Return a beam bigquery schema for the output table."""
table_schema = bigquery.TableSchema()
for (name, field_type) in bigquery_schema.items():
field_schema = bigquery.TableFieldSchema()
field_schema.name = name
field_schema.type = field_type
field_schema.mode = 'nullable' # all fields are flat
table_schema.fields.append(field_schema)
return table_schema
def flatten_measurement(line):
"""Flattens a measurement string into several roundtrip rows.
Args:
line: a json string describing a censored planet measurement. example
{'Keyword': 'test.com,
'Server': '1.2.3.4',
'Results': [{'Success': true},
{'Success': false}]}
Returns:
an array of dicts containing individual roundtrip information
[{'column_name': field_value}]
example
[{'domain': 'test.com', 'ip': '1.2.3.4', 'success': true}
{'domain': 'test.com', 'ip': '1.2.3.4', 'success': true}]
"""
scan = json.loads(line)
pprint(scan)
rows = []
for result in scan['Results']:
rows.append({
'domain': scan['Keyword'],
'ip': scan['Server'],
'date': result['StartTime'][:10],
'start_time': result['StartTime'],
'end_time': result['EndTime'],
'retries': scan['Retries'],
'sent': result['Sent'],
'received': result['Received'],
'error': result['Error'],
'blocked': scan['Blocked'],
'success': result['Success'],
'fail_sanity': scan['FailSanity'],
'stateful_block': scan['StatefulBlock'],
})
pprint(rows)
return rows
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
# default='gs://firehook-scans/echo/CP_Quack-echo-2018-07-27-15-20-11/results.json',
default='gs://firehook-dataflow-test/results-short.json',
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
default='firehook-censoredplanet:test.example',
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_args.extend([
# DataflowRunner or DirectRunner
'--runner=DirectRunner',
'--project=firehook-censoredplanet',
'--region=us-east1',
'--staging_location=gs://firehook-dataflow-test/staging',
'--temp_location=gs://firehook-dataflow-test/temp',
'--job_name=flatten-json-job',
])
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=pipeline_options) as p:
lines = p | ReadFromText(known_args.input)
rows = (lines | 'flatten json' >> (beam.FlatMap(flatten_measurement)))
rows | 'Write' >> beam.io.WriteToBigQuery(
known_args.output,
schema=get_bigquery_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
# WRITE_TRUNCATE is slow when testing.
# write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| Python | 0.000001 | |
361c3496274a960c5e927899a39618f8fee9db0a | Add Basic Trellis Histogram to Examples | altair/vegalite/v2/examples/trellis_histogram.py | altair/vegalite/v2/examples/trellis_histogram.py | """
Trellis Histogram
-----------------
This example shows how to make a basic trellis histogram.
https://vega.github.io/vega-lite/examples/trellis_bar_histogram.html
"""
import altair as alt
cars = alt.load_dataset('cars')
chart = alt.Chart(cars).mark_bar().encode(
x=alt.X("Horsepower",
type="quantitative",
bin=alt.BinTransform(
maxbins=15
)),
y='count(*):Q',
row='Origin'
)
| Python | 0 | |
a7e8b3a2b63ed4717a783754446b30c4f288e76a | Create channelinfo.py | cogs/channelinfo.py | cogs/channelinfo.py | import discord
from discord.ext import commands
from random import randint, choice
class Channelinfo:
"""Shows Channel infos."""
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['channel', 'cinfo', 'ci'], pass_context=True, no_pm=True)
async def channelinfo(self, ctx, *, channel: discord.channel=None):
"""Shows channel informations"""
if not channel:
channel = ctx.message.channel
# else:
# channel = ctx.message.guild.get_channel(int(chan))
# if not channel: channel = self.bot.get_channel(int(chan))
data = discord.Embed()
content = None
if hasattr(channel, 'mention'):
content = self.bot.bot_prefix+"**Informations about Channel:** "+channel.mention
if hasattr(channel, 'changed_roles'):
if len(channel.changed_roles) > 0:
if channel.changed_roles[0].permissions.read_messages:
data.color = discord.Colour.green()
else: data.color = discord.Colour.red()
if isinstance(channel, discord.TextChannel): _type = "Text"
elif isinstance(channel, discord.VoiceChannel): _type = "Voice"
else: _type = "Unknown"
data.add_field(name="Type", value=_type)
data.add_field(name="ID", value=channel.id)
if hasattr(channel, 'position'):
data.add_field(name="Position", value=channel.position)
if isinstance(channel, discord.VoiceChannel):
if channel.user_limit != 0:
data.add_field(name="User Number", value="{}/{}".format(len(channel.voice_members), channel.user_limit))
else:
data.add_field(name="User Number", value="{}".format(len(channel.voice_members)))
userlist = [r.display_name for r in channel.members]
if not userlist:
userlist = "None"
else:
userlist = "\n".join(userlist)
data.add_field(name="Users", value=userlist)
data.add_field(name="Bitrate", value=channel.bitrate)
elif isinstance(channel, discord.TextChannel):
if channel.members:
data.add_field(name="Members", value="%s"%len(channel.members))
if channel.topic:
data.add_field(name="Topic", value=channel.topic, inline=False)
_hidden = []; _allowed = []
for role in channel.changed_roles:
if role.permissions.read_messages: _allowed.append(role.mention)
else: _hidden.append(role.mention)
if len(_allowed) > 0: data.add_field(name='Allowed Roles (%s)'%len(_allowed), value=', '.join(_allowed), inline=False)
if len(_hidden) > 0: data.add_field(name='Restricted Roles (%s)'%len(_hidden), value=', '.join(_hidden), inline=False)
if channel.created_at:
data.set_footer(text=("Created on {} ({} days ago)".format(channel.created_at.strftime("%d %b %Y %H:%M"), (ctx.message.created_at - channel.created_at).days)))
# try:
await ctx.send(content if content else None, embed=data)
# except:
# await ctx.send(self.bot.bot_prefix+"I need the `Embed links` permission to send this")
def setup(bot):
bot.add_cog(Channelinfo(bot))
| Python | 0 | |
333453fe6a74d7bada941ee7aeed3660452efcaf | add tests | tests/install_tests/test_cupy_builder/test_command.py | tests/install_tests/test_cupy_builder/test_command.py | from cupy_builder._command import filter_files_by_extension
def test_filter_files_by_extension():
sources_cpp = ['a.cpp', 'b.cpp']
sources_pyx = ['c.pyx']
sources = sources_cpp + sources_pyx
assert filter_files_by_extension(
sources, '.cpp') == (sources_cpp, sources_pyx)
assert filter_files_by_extension(
sources, '.pyx') == (sources_pyx, sources_cpp)
assert filter_files_by_extension(
sources, '.cu') == ([], sources)
assert filter_files_by_extension(
sources_cpp, '.cpp') == (sources_cpp, [])
assert filter_files_by_extension(
sources_cpp, '.pyx') == ([], sources_cpp)
| Python | 0 | |
8a7963644ff470fc7da8bc22a7f3fd6ef1be3ed2 | define password generator. | rio/utils/token.py | rio/utils/token.py | # -*- coding: utf-8 -*-
"""
rio.utils.token
~~~~~~~~~~~~~~~
"""
import random
import string
def password_generator(length):
"""Generate a random password.
:param length: integer.
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for _ in range(length))
| Python | 0.000001 | |
1d4397860ffd297eb02b5f4b96b0e18a3c7e12cd | Add test recipes. | badgify/tests/recipes.py | badgify/tests/recipes.py | # -*- coding: utf-8 -*-
from ..recipe import BaseRecipe
from ..compat import get_user_model
class BadRecipe(object):
pass
class NotImplementedRecipe(BaseRecipe):
pass
class Recipe1(BaseRecipe):
name = 'Recipe 1'
slug = 'recipe1'
description = 'Recipe 1 description'
@property
def image(self):
return 'image'
@property
def user_ids(self):
return (get_user_model().objects.filter(love_python=True)
.values_list('id', flat=True))
class Recipe2(BaseRecipe):
name = 'Recipe 2'
slug = 'recipe2'
description = 'Recipe 2 description'
@property
def image(self):
return 'image'
@property
def user_ids(self):
return []
| Python | 0 | |
f6c07ad620f7e3ef73fdad5f2d97d1dd911e390f | Create signal.py | signal.py | signal.py | #!/usr/bin/python3
# Send via Signal
import os
from pydbus import SystemBus
from dateutil import parser
from datetime import datetime
eventhostname=os.environ.get ("NOTIFY_HOSTNAME", "<no host>")
eventdatetimeString=os.environ.get ("NOTIFY_LONGDATETIME", "<no time>")
if eventdatetimeString == "<no time>":
eventdatetime = datetime(2017,1,1)
else:
eventdatetime = parser.parse (eventdatetimeString)
eventduration=os.environ.get ("NOTIFY_LASTHOSTSTATECHANGE_REL", "<no duration>")
eventhoststate=os.environ.get ("NOTIFY_HOSTSTATE", "<no state>")
eventlasthoststate=os.environ.get ("NOTIFY_LASTHOSTSTATE", "<no last state>")
bus = SystemBus()
signal = bus.get ("org.asamk.Signal")
message = eventhostname + ": " + eventhoststate + " after " + eventlasthoststate + " (" + eventdurati$
signal.sendMessage(message, [], ['+XXXXXXXXXXXXXXXXXXX'])
| Python | 0.000002 | |
269e9821a52f1d68e2a48beb76b20c227dc84b55 | add 0007(count one file omitting multiple-line comment) | robot527/0007/count_codes.py | robot527/0007/count_codes.py | #! usr/bin/python3
"""
第 0007 题:有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。
包括空行和注释,但是要分别列出来。
"""
class PyfileInfo:
def __init__(self, file):
self.file_name = file
self.total_line_num = 0
self.blank_line_num = 0
self.comment_line_num = 0
def count_lines(self):
if self.file_name[-3:] != '.py':
print(self.file_name + ' is not a .py file!')
return
try:
with open(self.file_name) as code:
for each_line in code:
self.total_line_num += 1
temp = each_line.strip()
if temp == '':
self.blank_line_num += 1
elif temp[0] == '#':
self.comment_line_num += 1
except IOError as err:
print('File error: ' + str(err))
import os
target_path = '.'
file_list = [f for f in os.listdir(target_path)
if os.path.isfile(os.path.join(target_path, f))]
#print(file_list, len(file_list))
pyfile_list = [os.path.join(target_path, f) for f in file_list
if f[-3:] == '.py']
print(pyfile_list[0])
pyf1 = PyfileInfo(pyfile_list[0])
pyf1.count_lines()
#pyf2 = PyfileInfo('test.c')
#pyf2.count_lines()
print('==' * 18)
print('Total line number is:', pyf1.total_line_num)
print('Blank line number is:', pyf1.blank_line_num)
print('Comment line number is:', pyf1.comment_line_num)
| Python | 0 | |
74d8390dce4dd5a8fe8a6f7f4304b80afadfec1d | Add basic calendar framework | app/calender.py | app/calender.py | class CalenderManager:
def __init__(self):
pass
def add_event(self, user, starttime, endtime):
pass
def get_current_event(self):
#Return NONE if no event
pass
def remove_event(self, id):
pass
def get_upcoming_events(self, num):
pass | Python | 0.000002 | |
93a03023cf0c73b78680d431c828f4b9ebc49802 | add disk_cache.py | 3.下载缓存/3.disk_cache.py | 3.下载缓存/3.disk_cache.py | #!/usr/bin/env python
# coding:utf-8
import os
import re
import zlib
import shutil
import urlparse
import cPickle as pickle
from datetime import datetime, timedelta
from link_crawler import link_crawler
class DiskCache:
"""通过使用磁盘空间保存文件的方式对资源文件进行缓存
"""
def __init__(self, cache_dir='cache', expires=timedelta(days=30), compress=True):
"""设置代码保存的磁盘位置、设置文件过期时长、设置是否对文件进行压缩
"""
self.cache_dir = cache_dir
self.expires = expires
self.compress = compress
def __getitem__(self, url):
"""从磁盘加载数据的URL
"""
path = self.url_to_path(url)
if os.path.exists(path):
with open(path, 'rb') as fp:
data = fp.read()
if self.compress:
data = zlib.decompress(data)
result, timestamp = pickle.loads(data)
if self.has_expired(timestamp):
raise KeyError(url + ' has expired')
return result
else:
raise KeyError(url + ' does not exist')
def __setitem__(self, url, result):
"""这个URL保存数据到磁盘
"""
path = self.url_to_path(url)
folder = os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder)
data = pickle.dumps((result, datetime.utcnow()))
if self.compress:
data = zlib.compress(data)
with open(path, 'wb') as fp:
fp.write(data)
def __delitem__(self, url):
"""删除这个关键的价值和任何空的子目录
"""
path = self.url_to_path(url)
try:
os.remove(path)
os.removedirs(os.path.dirname(path))
except OSError:
pass
def url_to_path(self, url):
"""为这个URL创建文件系统路径
"""
components = urlparse.urlsplit(url)
path = components.path
if not path:
path = '/index.html'
elif path.endswith('/'):
path += 'index.html'
filename = components.netloc + path + components.query
filename = re.sub('[^/0-9a-zA-Z\-.,;_ ]', '_', filename)
filename = '/'.join(segment[:255] for segment in filename.split('/'))
return os.path.join(self.cache_dir, filename)
def has_expired(self, timestamp):
"""返回这个时间戳是否已经过期
"""
return datetime.utcnow() > timestamp + self.expires
def clear(self):
"""清除所有缓存的值
"""
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
if __name__ == '__main__':
link_crawler('http://example.webscraping.com/', '/places/default/(index|view)', cache=DiskCache())
| Python | 0.000002 | |
b0be4dfb0fa13bb708cfe3aaa9c1f576dd5ccaa6 | Add initial EDID parser | src/edider/parser.py | src/edider/parser.py | #!/usr/bin/env python3
from itertools import zip_longest
from collections import namedtuple
import string
import struct
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def bytes_to_bits(bstr):
return ['{:08b}'.format(x) for x in bstr]
def bytes_to_printable(bstr):
bstr = bstr.decode('ascii', errors='ignore')
return ''.join([x for x in bstr if x in string.printable])
def parse_descriptor(desc):
# EDID Other Monitor Descriptors
# Bytes Description
# 0–1 Zero, indicates not a detailed timing descriptor
# 2 Zero
# 3 Descriptor type. FA–FF currently defined. 00–0F reserved for vendors.
# 4 Zero
# 5–17 Defined by descriptor type. If text, code page 437 text
GENERAL_TXT = 254
if desc[0] != 0:
return ''
header = struct.unpack('5c', desc[0:5])
descr_type = header[3][0]
rest = desc[5:]
if descr_type == GENERAL_TXT:
return bytes_to_printable(rest)
print('Unexpected Descriptor type:', f'{descr_type:02X}')
print(rest)
return bytes_to_printable(rest)
class EDIDSegmenter:
def __init__(self, edid_bytes):
"Pass the EDID as a bytes string"
self._edid = edid_bytes
def _get_bytes(self, offset, length):
return self._edid[offset : offset+length]
@property
def fixed_header(self):
return self._get_bytes(0, 8)
@property
def manufacturer_id(self):
return self._get_bytes(8, 2)
@property
def product_code(self):
return self._get_bytes(10, 2)
@property
def serial_number(self):
return self._get_bytes(12, 4)
@property
def manufacture_week(self):
return self._get_bytes(16, 1)
@property
def manufacture_year(self):
return self._get_bytes(17, 1)
@property
def edid_version(self):
return self._get_bytes(18, 1)
@property
def edid_revision(self):
return self._get_bytes(19, 1)
@property
def horizontal_size(self):
return self._get_bytes(21, 1)
@property
def vertical_size(self):
return self._get_bytes(22, 1)
@property
def descriptor1(self):
return self._get_bytes(54, 18)
@property
def descriptor2(self):
return self._get_bytes(72, 18)
@property
def descriptor3(self):
return self._get_bytes(90, 18)
@property
def descriptor4(self):
return self._get_bytes(108, 18)
def __repr__(self):
cname = self.__class__.__name__
return f'{cname}({self._edid!r})'
class EDIDParser(EDIDSegmenter):
@property
def manufacturer_id(self):
alphabet = ' abcdefghijklmnopqrstuvwxyz'
mid = super().manufacturer_id
bits = bytes_to_bits(mid)
bits = ''.join(bits)
bits = bits[1:] # remove header zero
letters = [int(''.join(x), 2) for x in grouper(bits, 5)]
letters = [alphabet[i] for i in letters]
return ''.join(letters).upper()
@property
def manufacture_year(self):
year = super().manufacture_year[0]
return 1990 + year
@property
def edid_version(self):
return super().edid_version[0]
@property
def edid_revision(self):
return super().edid_revision[0]
@property
def horizontal_size(self):
"Horizontal size in cm"
return super().horizontal_size[0]
@property
def vertical_size(self):
"Vertical size in cm"
return super().vertical_size[0]
@property
def descriptor1(self):
desc = parse_descriptor(super().descriptor1)
return desc.strip()
@property
def descriptor2(self):
desc = parse_descriptor(super().descriptor2)
return desc.strip()
@property
def descriptor3(self):
desc = parse_descriptor(super().descriptor3)
return desc.strip()
@property
def descriptor4(self):
desc = parse_descriptor(super().descriptor4)
return desc.strip()
MonInfo = namedtuple('MonInfo', ('mfg', 'year', 'horizontal_size', 'vertical_size', 'descriptor'))
def monitor_info(edid_bytes):
edp = EDIDParser(edid_bytes)
desc = [edp.descriptor1, edp.descriptor2, edp.descriptor3, edp.descriptor4]
desc = [x for x in desc if x]
desc = '; '.join(desc)
return MonInfo(
edp.manufacturer_id,
edp.manufacture_year,
edp.horizontal_size,
edp.vertical_size,
desc,
)
if __name__ == '__main__':
import x11read
EXAMPL_EDID = x11read.get_output_edid(i_screen=0)
x = monitor_info(EXAMPL_EDID)
print(x)
| Python | 0.000001 | |
9c33c1e7fcaf111e7a590b035974aa288bbe5895 | Create weechat script | czar.py | czar.py | SCRIPT_NAME = 'czar'
SCRIPT_AUTHOR = 'Samuel Damashek <samuel.damashek@gmail.com>'
SCRIPT_VERSION = '1.0'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'Run czar commands natively in WeeChat'
SCRIPT_COMMAND = 'czar'
try:
import weechat
except ImportError:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: http://www.weechat.org/')
exit(1)
import hashlib, time
czar_settings_default = {
'key' : ('', 'key for signing messages'),
}
czar_settings = {}
def commandtoken(nick, command):
timestr = str(int(time.time()) // 300)
return hashlib.sha1("{}{}{}{}".format(timestr, nick, command, czar_settings['key']).encode()).hexdigest()
def optoken(challenge, nick):
return hashlib.sha1("{}{}{}".format(challenge, czar_settings['key'], nick).encode()).hexdigest()
def czar_config_cb(data, option, value):
global czar_settings
pos = option.rfind('.')
czar_settings[option[pos+1:]] = value
return weechat.WEECHAT_RC_OK
def czar_cmd_cb(data, buffer, args):
args = args.split(' ')
if args[0] == 'op':
servername = (weechat.buffer_get_string(buffer, 'name').split('.'))[0]
if len(args) > 2:
plugin = weechat.buffer_get_string(buffer, 'plugin')
name = weechat.buffer_get_string(buffer, 'name')
name = '.'.join(name.split('.')[:-1]+[args[2]])
buf_context = weechat.buffer_search(plugin, name)
else:
buf_context = buffer
weechat.command(buf_context, '%s: opme %s' % (args[1], commandtoken(weechat.info_get('irc_nick',servername), 'opme:')))
elif args[0] == 'cmd':
servername = (weechat.buffer_get_string(buffer, 'name').split('.'))[0]
cmdargs = ','.join(args[2:])
token = commandtoken(weechat.info_get('irc_nick',servername), ':'.join([args[1],cmdargs]))
weechat.command(buffer, ';;%s %s %s' % (args[1], token, ' '.join(args[2:])))
else:
weechat.prnt('', 'Invalid command in czar.')
return weechat.WEECHAT_RC_OK
if __name__ == '__main__':
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
version = weechat.info_get('version_number', '') or 0
for option, value in czar_settings_default.items():
if weechat.config_is_set_plugin(option):
czar_settings[option] = weechat.config_get_plugin(option)
else:
weechat.config_set_plugin(option, value[0])
czar_settings[option] = value[0]
if int(version) >= 0x00030500:
weechat.config_set_desc_plugin(option, '%s (default: "%s")' % (value[1], value[0]))
weechat.hook_config('plugins.var.python.%s.*' % SCRIPT_NAME, 'czar_config_cb', '')
#weechat.hook_print('', '', 'CHALLENGE', 1, 'czar_msg_cb', '')
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC, 'op|cmd',
' op <nick> [channel]: request to be opped in the current channel or [channel] if specified, by <nick>\n'
'cmd <cmd>: issue <cmd> to all bots (do not specify the cmdchar)', 'op|cmd', 'czar_cmd_cb', '')
| Python | 0 | |
23086155315b39e814a1a73b49c80b19cbdb8e12 | 476. Number Complement | p476.py | p476.py | import unittest
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
mask = num
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
return num ^ mask
class Test(unittest.TestCase):
def test(self):
self._test(5, 2)
self._test(1, 0)
def _test(self, num, expected):
actual = Solution().findComplement(num)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| Python | 0.999999 | |
af19c62cfa27f27cd37bf558ac77a7845dff7754 | Create generate_chapters.py | sandbox/generate_chapters.py | sandbox/generate_chapters.py | @language python
VOLUME = 'II'
TOTAL_CHAPTERS = 42
URL = "http://www.feynmanlectures.caltech.edu/{0}_{1:02}.html"
copied_position = p.copy()
for index in range(1, TOTAL_CHAPTERS+1):
new_node = copied_position.insertAsLastChild()
new_node.h = "@chapter {0} Chapter {1:02} - ".format(VOLUME, index)
new_node.b = URL.format(VOLUME, index)
c.redraw_now()
| Python | 0.000009 | |
48115d48da43f7f4517d8f55edee95d6c9e7cc45 | Create saveGraphToFile.py | save-load/saveGraphToFile.py | save-load/saveGraphToFile.py | from py2neo import Graph, Node, Relationship
graph = Graph()
# Get username
usr_name = input("Enter username: ")
print ("Username: " + usr_name)
# Get user_id
usr_id = input("Enter user id: ")
print ("User id: " + usr_id)
print("Nodes")
resultsAllNodes = graph.cypher.execute("MATCH (n) RETURN n")
print(resultsAllNodes)
print("Nodes plus relationships")
resultsAll = graph.cypher.execute("START n=node(*) MATCH (n)-[r]->(m) RETURN n,r,m")
print(resultsAll)
query1 = "MATCH (user { username:'"
query1 = query1 + usr_name
query1 = query1 + "' })-[:"
query1 = query1 + usr_id
query1 = query1 + "]->(n) RETURN n"
results = graph.cypher.execute(query1);
print(results)
query2 = "MATCH (user { username:'"
query2 = query2 + usr_name
query2 = query2 + "' })-[:"
query2 = query2 + usr_id
query2 = query2 + "]->(n)-[r]->(m) RETURN r"
results2 = graph.cypher.execute(query2);
print(results2)
f = open('graph.txt', 'w')
f.write(usr_name+"\n")
f.write(usr_id+"\n")
s = str(results)
s2 = str(results2)
f.write(s)
f.write(s2)
f.close()
| Python | 0.000001 | |
34fff4bf13fa2c4d481a06339981db08239138ae | add test case of petitlyrics | lyric_engine/tests/test_petitlyrics.py | lyric_engine/tests/test_petitlyrics.py | # coding: utf-8
import os
import sys
module_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'modules')
sys.path.append(module_dir)
import unittest
from petitlyrics import PetitLyrics as Lyric
class PetitLyricsTest(unittest.TestCase):
def test_url_01(self):
url = 'http://petitlyrics.com/lyrics/34690'
obj = Lyric(url)
obj.parse()
self.assertEqual(obj.title, u'Tune The Rainbow')
self.assertEqual(obj.artist, u'坂本 真綾')
self.assertEqual(obj.lyricist, u'岩里 祐穂')
self.assertEqual(obj.composer, u'菅野 よう子')
self.assertEqual(len(obj.lyric), 819)
def test_url_02(self):
url = 'http://petitlyrics.com/lyrics/936622'
obj = Lyric(url)
obj.parse()
self.assertEqual(obj.title, u'RPG')
self.assertEqual(obj.artist, u'SEKAI NO OWARI')
self.assertEqual(obj.lyricist, u'Saori/Fukase')
self.assertEqual(obj.composer, u'Fukase')
self.assertEqual(len(obj.lyric), 933)
if __name__ == '__main__':
unittest.main()
| Python | 0.002379 | |
16fd4ba06b6da8ec33a83a8cfe2e38a130fb47b3 | Add a module for common plotting routines that will be used. | plot.py | plot.py | #!/usr/bin/env python
"""
plot.py
State Estimation and Analysis for PYthon
Module with plotting utilities
Written by Brian Powell on 10/18/13
Copyright (c)2013 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import numpy as np
from scipy import ndimage
import os
import re
from matplotlib import pyplot as plt
def stackbar(x, y, colors=None, **kwargs):
"""
Given an array of vectors in y, draw a bar chart for each one stacked on
the prior.
"""
s=y[0,:]
if colors is None:
colors = [ "" for i in range(0,y.shape[0]) ]
plt.bar(x, y[0,:], color=colors[0], **kwargs)
for i in range(1,y.shape[0]):
plt.bar(x, y[i,:], color=colors[i], bottom=s, **kwargs)
s=s+y[i,:]
| Python | 0 | |
cbbc6d943ebc4f7e1efa84f2c0b5d976de21101d | Add "imported from" source using a hardcoded list of items | scripts/claimit.py | scripts/claimit.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Legoktm
Copyright (C) 2013 Pywikipediabot team
Distributed under the MIT License
Usage:
python claimit.py [pagegenerators] P1 Q2 P123 Q456
You can use any typical pagegenerator to provide with a list of pages
Then list the property-->target pairs to add.
"""
import pywikibot
from pywikibot import pagegenerators
repo = pywikibot.Site().data_repository()
source_values = {'en': pywikibot.ItemPage(repo, 'Q328'),
'sv': pywikibot.ItemPage(repo, 'Q169514'),
'de': pywikibot.ItemPage(repo, 'Q48183'),
'it': pywikibot.ItemPage(repo, 'Q11920'),
'no': pywikibot.ItemPage(repo, 'Q191769'),
'ar': pywikibot.ItemPage(repo, 'Q199700'),
'es': pywikibot.ItemPage(repo, 'Q8449'),
'pl': pywikibot.ItemPage(repo, 'Q1551807'),
'ca': pywikibot.ItemPage(repo, 'Q199693'),
'fr': pywikibot.ItemPage(repo, 'Q8447'),
'nl': pywikibot.ItemPage(repo, 'Q10000'),
'pt': pywikibot.ItemPage(repo, 'Q11921'),
'ru': pywikibot.ItemPage(repo, 'Q206855'),
'vi': pywikibot.ItemPage(repo, 'Q200180'),
'be': pywikibot.ItemPage(repo, 'Q877583'),
'uk': pywikibot.ItemPage(repo, 'Q199698'),
'tr': pywikibot.ItemPage(repo, 'Q58255'),
} # TODO: This should include all projects
imported_from = pywikibot.Claim(repo, 'p143')
source = source_values.get(pywikibot.Site().language(), None)
if source:
imported_from.setTarget(source)
def addClaims(page, claims):
'''
The function will add the claims to the wikibase page
'''
item = pywikibot.ItemPage.fromPage(page)
pywikibot.output('Processing %s' % page)
if not item.exists():
pywikibot.output('%s doesn\'t have a wikidata item :(' % page)
#TODO FIXME: We should provide an option to create the page
return False
for claim in claims:
if claim.getID() in item.get().get('claims'):
pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))
#TODO FIXME: This is a very crude way of dupe checking
else:
pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))
item.addClaim(claim)
if source:
claim.addSource(imported_from, bot=True)
#TODO FIXME: We need to check that we aren't adding a duplicate
def main():
gen = pagegenerators.GeneratorFactory()
commandline_claims = list()
for arg in pywikibot.handleArgs():
if gen.handleArg(arg):
continue
commandline_claims.append(arg)
if len(commandline_claims) % 2:
raise ValueError # or something.
claims = list()
for i in xrange (0, len(commandline_claims), 2):
claim = pywikibot.Claim(repo, commandline_claims[i])
claim.setTarget(pywikibot.ItemPage(repo, commandline_claims[i+1]))
claims.append(claim)
generator = gen.getCombinedGenerator()
if generator:
for page in generator:
addClaims(page, claims)
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Legoktm
Copyright (C) 2013 Pywikipediabot team
Distributed under the MIT License
Usage:
python claimit.py [pagegenerators] P1 Q2 P123 Q456
You can use any typical pagegenerator to provide with a list of pages
Then list the property-->target pairs to add.
"""
import pywikibot
from pywikibot import pagegenerators
repo = pywikibot.Site().data_repository()
def addClaims(page, claims):
'''
The function will add the claims to the wikibase page
'''
item = pywikibot.ItemPage.fromPage(page)
pywikibot.output('Processing %s' % page)
if not item.exists():
pywikibot.output('%s doesn\'t have a wikidata item :(' % page)
#TODO FIXME: We should provide an option to create the page
return False
for claim in claims:
if claim.getID() in item.get().get('claims'):
pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))
#TODO FIXME: This is a very crude way of dupe checking
else:
pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))
item.addClaim(claim)
#TODO FIXME: We should add a source for each claim that is added
#TODO FIXME: We need to check that we aren't adding a duplicate
def main():
gen = pagegenerators.GeneratorFactory()
commandline_claims = list()
for arg in pywikibot.handleArgs():
if gen.handleArg(arg):
continue
commandline_claims.append(arg)
if len(commandline_claims) % 2:
raise ValueError # or something.
claims = list()
for i in xrange (0, len(commandline_claims), 2):
claim = pywikibot.Claim(repo, commandline_claims[i])
claim.setTarget(pywikibot.ItemPage(repo, commandline_claims[i+1]))
claims.append(claim)
generator = gen.getCombinedGenerator()
if generator:
for page in generator:
addClaims(page, claims)
if __name__ == "__main__":
main()
| Python | 0.000051 |
a57fcbd436780adfaacfefb426b53458c65e1012 | Create pubnubStreamUnlimited.py | pubnubStreamUnlimited.py | pubnubStreamUnlimited.py | from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
# Variables that contains the user credentials to access Twitter API
access_token = "Twitter_Access_Token"
access_token_secret = "Twitter_Access_Token_Secret"
consumer_key = "Twitter_Consumer_Key"
consumer_secret = "Twitter_Consumer_Secret"
# Configure personal subscribe and publish key
pnconfigRachel = PNConfiguration()
pnconfigRachel.subscribe_key = 'Pubnub_Subscribe_Key'
pnconfigRachel.publish_key = 'Pubnub_Publish_Key'
pubnubRachel = PubNub(pnconfigRachel)
# Callback for any publish
def my_publish_callback(envelope, status):
# Check whether request successfully completed or not
if not status.is_error():
print("Published")
pass # Message successfully published to specified channel.
else:
print("Publish Error")
pass # Handle message publish error. Check 'category' property to find out possible issue
# because of which request did fail.
# Request can be resent using: [status retry];
# This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
# Initiate session_id for the Watson block
session_Id = 0
def on_data(self, data):
try:
tweet = json.loads(data)
self.session_Id = self.session_Id + 1
pubnubRachel.publish().channel("sentiment-analysis").message({"session_id":self.session_Id,"text":tweet['text']}).async(my_publish_callback)
return True
except KeyError:
pass
def on_error(self, status):
print status
# Callback for sentiment channel
class SentimentSubscribeCallback(SubscribeCallback):
# Your Initial State bucket key
# Make sure to create a bucket in IS with the same key
bucket_key = "pubnubtrump"
def presence(self, pubnub, presence):
pass # handle incoming presence data
def status(self, pubnub, status):
if status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:
pass # This event happens when radio / connectivity is lost
elif status.category == PNStatusCategory.PNConnectedCategory:
# Connect event. You can do stuff like publish, and know you'll get it.
# Or just use the connected event to confirm you are subscribed for
# UI / internal notifications, etc
pass
elif status.category == PNStatusCategory.PNReconnectedCategory:
pass
# Happens as part of our regular operation. This event happens when
# radio / connectivity is lost, then regained.
elif status.category == PNStatusCategory.PNDecryptionErrorCategory:
pass
# Handle message decryption error. Probably client configured to
# encrypt messages and on live data feed it received plain text.
# Here we construct and publish a payload made up of parameters from sentiment analysis
def message(self, pubnub, message):
previous_message = "placeholder"
if previous_message != message.message:
if 'session_sentiment' in message.message:
payloadMsg = {"key": "Tweet","value": message.message['text']}
if 'positive' in message.message['session_sentiment']:
payloadPos = {"key": "Positive Level","value":message.message['session_sentiment']['positive']['count']}
else:
payloadPos = {"key": "Positive Level","value":0}
if 'negative' in message.message['session_sentiment']:
payloadNeg={"key": "Negative Level","value": message.message['session_sentiment']['negative']['count']}
else:
payloadNeg={"key": "Negative Level","value": 0}
if 'neutral' in message.message['session_sentiment']:
payloadNeut={"key": "Neutral Level","value": message.message['session_sentiment']['neutral']['count']}
else:
payloadNeut={"key": "Neutral Level","value": 0}
payloadScore={"key": "Score","value": message.message['score']}
if message.message['score'] > 0.25:
payloadCoord={"key": "User Location Pos","value":TwitterSubscribeCallback.coord}
elif message.message['score'] < -0.25:
payloadCoord={"key": "User Location Neg","value":TwitterSubscribeCallback.coord}
elif TwitterSubscribeCallback.coord == "none":
payloadCoord={"key": "User Location","value":"No location data"}
else:
payloadCoord={"key": "User Location Neut","value":TwitterSubscribeCallback.coord}
payload=merge(payloadMsg,payloadPos,payloadNeg,payloadNeut,payloadScore,payloadCoord)
print payload
payload = {"events": payload, "bucketKey": self.bucket_key}
pubnubRachel.publish().channel("initial-state-streamer").message(payload).async(my_publish_callback)
previous_message = message.message
pass
else:
print "No sentiment message from Watson"
pass
else:
print "Duplicate Message"
pass
# Function that batches all the events associated with one tweet
def merge(set1,set2,set3,set4,set5,set6):
lst=[]
lst.append(set1)
lst.append(set2)
lst.append(set3)
lst.append(set4)
lst.append(set5)
lst.append(set6)
return lst
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filters Twitter Streams to capture data by the keywords below
stream.filter(track=['Trump','trump','POTUS','potus'])
# Configure PubNub subscriptions
pubnubRachel.add_listener(SentimentSubscribeCallback())
pubnubRachel.subscribe().channels('sentiment-analysis').execute()
| Python | 0.000001 | |
dcca5e257336b7c28568c1a0535d4c3520836e8b | Create serializekiller.py | serializekiller.py | serializekiller.py | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: SerializeKiller
# Purpose: Finding vulnerable vulnerable servers
#
# Author: (c) John de Kroon, 2015
#-------------------------------------------------------------------------------
import os
import subprocess
import json
import threading
import time
import socket
import sys
import argparse
from datetime import datetime
parser = argparse.ArgumentParser(prog='serializekiller.py', formatter_class=argparse.RawDescriptionHelpFormatter, description="""SerialIceKiller.
Usage:
./serializekiller.py targets.txt
""")
parser.add_argument('file', help='File with targets')
args = parser.parse_args()
def nmap(url, retry = False, *args):
global num_threads
global shellCounter
global threads
num_threads +=1
found = False
cmd = 'nmap --open -p 1099,5005,8880,7001,16200 '+url
print "Scanning: "+url
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if "5005" in out:
if(verify(url, "5005")):
found = True
if "8880" in out:
if(verify(url, "8880")):
found = True
if "1099" in out:
print " - (Possibly) Vulnerable "+url+" (1099)"
found = True
if "7001" in out:
if(weblogic(url, 7001)):
found = True
if "16200" in out:
if(weblogic(url, 16200)):
found = True
if(found):
shellCounter +=1
num_threads -=1
except:
num_threads -=1
threads -= 1
time.sleep(5)
if(retry):
print " ! Unable to scan this host "+url
else:
nmap(url, True)
def verify(url, port, retry = False):
try:
cmd = 'curl -m 10 --insecure https://'+url+":"+port
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if "rO0AB" in out:
print " - Vulnerable Websphere: "+url+" ("+port+")"
return True
cmd = 'curl -m 10 http://'+url+":"+port
with open(os.devnull, 'w') as fp:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if "rO0AB" in out:
print " - Vulnerable Websphere: "+url+" ("+port+")"
return True
except:
time.sleep(3)
if(retry):
print " ! Unable to verify vulnerablity for host "+url+":"+str(port)
return False
return verify(url, port, True)
#Used this part from https://github.com/foxglovesec/JavaUnserializeExploits
def weblogic(url, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (url, port)
sock.connect(server_address)
# Send headers
headers='t3 12.2.1\nAS:255\nHL:19\nMS:10000000\nPU:t3://us-l-breens:7001\n\n'
sock.sendall(headers)
data = sock.recv(1024)
sock.close()
if "HELO" in data:
print " - Vulnerable Weblogic: "+url+" ("+str(port)+")"
return True
return False
def dispatch(url):
try:
threading.Thread(target=nmap, args=(url, False, 1)).start()
except:
print " ! Unable to start thread. Waiting..."
time.sleep(2)
threads -= 2
dispatch(url)
def worker():
with open(args.file) as f:
content = f.readlines()
for url in content:
while((num_threads > threads)):
time.sleep(1)
url = str(url.replace("\r", ''))
url = str(url.replace("\n", ''))
url = str(url.replace("/", ''))
dispatch(url)
while(num_threads > 1):
time.sleep(1)
if(shellCounter > 0):
shellCounterText = "\033[1;31m"+str(shellCounter)+"\033[1;m"
else:
shellCounterText = str(shellCounter)
print "\r\n => scan done. "+shellCounterText+" vulnerable hosts found."
print "Execution time: "+str(datetime.now() - startTime)
exit()
if __name__ == '__main__':
startTime = datetime.now()
print "\033[1;31mStart SerializeKiller...\033[1;m"
print "This could take a while. Be patient.\r\n"
num_threads = 0
threads = 30
shellCounter = 0
t = threading.Thread(target=worker).start()
| Python | 0.000044 | |
94d40dfcf574d61df7def99a43d5b9fa0c75e244 | Add py solution for 406. Queue Reconstruction by Height | py/queue-reconstruction-by-height.py | py/queue-reconstruction-by-height.py | from collections import defaultdict
class Solution(object):
def insert(self, now, p, front):
lsize = 0 if now.left is None else now.left.val[1]
if front <= lsize:
if now.left is None:
now.left = TreeNode((p, 1))
else:
self.insert(now.left, p, front)
else:
if now.right is None:
now.right = TreeNode((p, 1))
else:
self.insert(now.right, p, front - lsize - 1)
now.val = (now.val[0], now.val[1] + 1)
def inOrder(self, cur):
if cur:
for x in self.inOrder(cur.left):
yield x
yield cur.val[0]
for x in self.inOrder(cur.right):
yield x
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
people.sort(key=lambda x:(-x[0], x[1]))
root = TreeNode((people[0], 1))
for p in people[1:]:
self.insert(root, p, p[1])
return list(self.inOrder(root))
| Python | 0.000749 | |
8fb97711dd84512a8a654de3dca2bee24689a2a7 | add a test for pytestmark | pytest_tornado/test/test_fixtures.py | pytest_tornado/test/test_fixtures.py | import pytest
from tornado import gen
_used_fixture = False
@gen.coroutine
def dummy(io_loop):
yield gen.Task(io_loop.add_callback)
raise gen.Return(True)
@pytest.fixture(scope='module')
def preparations():
global _used_fixture
_used_fixture = True
pytestmark = pytest.mark.usefixtures('preparations')
@pytest.mark.xfail(pytest.__version__ < '2.7.0',
reason='py.test 2.7 adds hookwrapper, fixes collection')
@pytest.mark.gen_test
def test_uses_pytestmark_fixtures(io_loop):
assert (yield dummy(io_loop))
assert _used_fixture
| Python | 0.000008 | |
76ce9117ed92a743734cd5ba7e209617a7664ad1 | Add partial benchmarking file for gala | benchmarks/bench_gala.py | benchmarks/bench_gala.py | import os
from gala import imio, features, agglo, classify
rundir = os.path.dirname(__file__)
dd = os.path.abspath(os.path.join(rundir, '../tests/example-data'))
em3d = features.default.paper_em()
def setup_trdata():
wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
return wstr, prtr, gttr
def setup_tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def setup_trgraph():
ws, pr, ts = setup_trdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_tsgraph():
ws, pr, ts = setup_tsdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_trexamples():
gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
g = setup_trgraph()
(X, y, w, e), _ = g.learn_agglomerate(gt, em3d, min_num_epochs=5)
y = y[:, 0]
return X, y
def setup_classifier():
X, y = setup_trexamples()
rf = classify.DefaultRandomForest()
rf.fit(X, y)
return rf
def setup_policy():
rf = classify.DefaultRandomForest()
cl = agglo.classifier_probability(em3d, rf)
return cl
def setup_tsgraph_queue():
g = setup_tsgraph()
cl = setup_policy()
g.merge_priority_function = cl
g.rebuild_merge_queue()
return g
| Python | 0 | |
79fb9449da73e70ef7ed9e1e6862da41fc42ab75 | Add finalized source codes | src/hw1.py | src/hw1.py | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pylab
G = nx.Graph()
G.add_edges_from([('1','2'),('1','4'),('2','3'),('2','4'),('3','4'),('4','5'),('5','6'),('5','7'),('6','7')]) # create the graph
print G.nodes(), '\n', G.edges(), '\n', G.degree().values() # check graph is correct
adj = nx.adj_matrix(G)
print "adjency matrix: \n", adj.todense() # print adjency matrix
k_ij = np.outer(G.degree().values(), G.degree().values())
mod = adj - k_ij / (2.*len(G.edges()))
print "modularity matrix: \n", mod
# suppose we only have 2 communities, we can iterate all the possible situations
# and get the optimum partition
modval_opt = -100000
z_opt = np.zeros((len(G.nodes()), 2))
for i in range(0, 2**(len(G.nodes())-1)): # iterate all the possible membership
partition = np.matrix(map(int, list('{0:07b}'.format(i)))) # get a membership vector directly from the bits of an interger
# e.g. i = 2, list('{0:07b}'.format(i)) will give a list
# ['0', '0', '0', '0', '0','1','0']
# map(int, list) will change it to be a int list [..., 1, 0]
Z = np.transpose(np.concatenate((partition, 1-partition))) # this is a 7x2 membership matrix
# print Z, "\n"
modval_partition = np.trace(Z.transpose() * mod * Z) / (2*len(G.edges()))
if modval_opt < modval_partition:
modval_opt = modval_partition
z_opt = Z
print "\n optimal community membership: \n", z_opt, "\n corresponds to maximum modularity value:\n", modval_opt
# print the graph with community in different color and node size propotional to
# its degree
plt.figure(figsize=(8,6))
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, node_color=np.array(z_opt.transpose())[0],
node_size=400*np.array(G.degree().values()), alpha=0.8, linewidths=0)
labels = {}
for node in G.nodes():
labels[node] = r'$'+node+'$'
nx.draw_networkx_labels(G, pos, labels, font_size=24)
nx.draw_networkx_edges(G, pos, width=16, edge_color='g', alpha=0.5)
plt.axis('off')
plt.show()
# for problem 3, to plot your facebook network. For me you can clearly see my
#friends is divided into 2 communities.
import codecs
with codecs.open('myFacebook.gml','r', 'utf8') as f: # don't use networkx.read_gml() functions, it will have
lines = f.readlines() # UnicodeDecodeError, use these three lines to load gml
fbG = nx.readwrite.gml.parse_gml(lines, relabel=True) # into a networkx graph
plt.figure(figsize=(18,12))
pos = nx.spring_layout(fbG)
gender = []
wallcount = []
ids = {}
deg = []
DegDict = fbG.degree()
for name,attr in fbG.node.iteritems():
gender.append(0 if attr['sex'] == 'male' else 1)
wallcount.append(attr['wallcount'])
ids[name] = attr['id']
deg.append(DegDict[name])
nx.draw_networkx_nodes(fbG, pos, node_color=gender, node_size=200+40*np.array(deg),
alpha=0.8, linewidths=0)
nx.draw_networkx_edges(fbG, pos, width=1.8, edge_color='black', alpha=0.3)
nx.draw_networkx_labels(fbG, pos, labels=ids, font_size=14, font_family='DejaVu Sans') # remove labels=ids will use name as labels
plt.axis('off')
plt.show() | Python | 0.000002 | |
180faadb24bf3b4d153f1c46c4883bdcc0b987ff | add a manifest (.cvmfspublished) abstraction class | python/cvmfs/manifest.py | python/cvmfs/manifest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by René Meusel
This file is part of the CernVM File System auxiliary tools.
"""
import datetime
class UnknownManifestField:
def __init__(self, key_char):
self.key_char = key_char
def __str__(self):
return self.key_char
class ManifestValidityError:
def __init__(self, message):
Exception.__init__(self, message)
class Manifest:
""" Wraps information from .cvmfspublished"""
def __init__(self, manifest_file):
""" Initializes a Manifest object from a file pointer to .cvmfspublished """
for line in manifest_file.readlines():
if len(line) == 0:
continue
if line[0:2] == "--":
break
self._read_line(line)
self._check_validity()
def __str__(self):
return "<Manifest for " + self.repository_name + ">"
def __repr__(self):
return self.__str__()
def _read_line(self, line):
""" Parse lines that appear in .cvmfspublished """
key_char = line[0]
data = line[1:-1]
if key_char == "C":
self.root_catalog = data
elif key_char == "X":
self.certificate = data
elif key_char == "H":
self.history_database = data
elif key_char == "T":
self.last_modified = datetime.datetime.fromtimestamp(int(data))
elif key_char == "R":
self.root_hash = data
elif key_char == "D":
self.ttl = int(data)
elif key_char == "S":
self.revision = int(data)
elif key_char == "N":
self.repository_name = data
elif key_char == "L":
self.unknown_field1 = data # TODO: ask Jakob what L means
else:
raise UnknownManifestField(key_char)
def _check_validity(self):
""" Checks that all mandatory fields are found in .cvmfspublished """
if not hasattr(self, 'root_catalog'):
raise ManifestValidityError("Manifest lacks a root catalog entry")
if not hasattr(self, 'root_hash'):
raise ManifestValidityError("Manifest lacks a root hash entry")
if not hasattr(self, 'ttl'):
raise ManifestValidityError("Manifest lacks a TTL entry")
if not hasattr(self, 'revision'):
raise ManifestValidityError("Manifest lacks a revision entry")
if not hasattr(self, 'repository_name'):
raise ManifestValidityError("Manifest lacks a repository name")
| Python | 0.000001 | |
e044dceeb4f6dd91a1e29228cde7906a114f36ba | add ping-listener.py | src/ping-listener.py | src/ping-listener.py | #!/usr/bin/python
# This tool is for educational use only!
# Description: Listen on a networkinterface for incomming pings (ICMP packets)
# and display this pings on the console
# Requirements: scapy + root privileges
import sys
from scapy.all import *
from pprint import *
def printusage():
""" Prints usage information """
print "Usage: {0} <iface>".format(sys.argv[0])
print " ---> This tool is for educational use only! <---"
if len(sys.argv) < 2:
printusage()
sys.exit(1)
def icmp_callback(packet):
# print the whole networkpacket object on the console
# TODO: Optimize output...
pprint(packet)
sniff(prn=icmp_callback, filter="icmp", iface=sys.argv[1], store=0)
| Python | 0 | |
a2dd80d7bcd1096b554c43d085eabe1eb858fec8 | Add code to create test data | code/make_test_data.py | code/make_test_data.py | from __future__ import absolute_import, division, print_function
import nibabel as nib
import numpy as np
import os
# Paths to directories containing the test subject's data
path_data = "../data/ds005/testsub/"
path_BOLD = path_data + "BOLD/task001_testrun/bold.nii.gz"
path_behav = path_data + "behav/task001_testrun/behavdata.txt"
# Create these directories
os.makedirs(path_BOLD)
os.makedirs(path_behav)
# Give the BOLD data the identity affine for simplicity
affine = np.eye(4)
# The fMRI data consists of three volumes of shape (3, 3, 3)
# Corner elements increase by 1 per unit time
# Edge elements increase by 2 per unit time
# Center of face elements increase by 3 per unit time
# The center element increases by 4 per unit time
data = np.array([[[[ 0, 1, 2],
[ 1, 3, 5],
[ 0, 1, 2]],
[[ 1, 3, 5],
[ 2, 5, 8],
[ 1, 3, 5]],
[[ 0, 1, 2],
[ 1, 3, 5],
[ 0, 1, 2]]],
[[[ 1, 3, 5],
[ 2, 5, 8],
[ 1, 3, 5]],
[[ 2, 5, 8],
[ 3, 7, 11],
[ 2, 5, 8]],
[[ 1, 3, 5],
[ 2, 5, 8],
[ 1, 3, 5]]],
[[[ 0, 1, 2],
[ 1, 3, 5],
[ 0, 1, 2]],
[[ 1, 3, 5],
[ 2, 5, 8],
[ 1, 3, 5]],
[[ 0, 1, 2],
[ 1, 3, 5],
[ 0, 1, 2]]]])
# BOLD.nii contains the above two elements
BOLD = nib.nift1.NiftiImage(data, affine)
nib.save(BOLD, path_BOLD)
# The behavioral data consists of four rows: a row of headers, and one row for
# each of three trials that occur at times 0.0, 2.0, and 4.0
behav = "onset\tgain\tloss\tPTval\trespnum\trespcat\tRT\n"
behav = behav + "0.00\t10\t20\t-9.80\t4\t0\t1.077\n"
behav = behav + "2.00\t20\t20\t0.20\t0\t-1\t0.000\n"
behav = behav + "4.00\t30\t20\t10.20\t2\t1\t1.328"
# Create behavdata.txt and open to write
f = open(path_behav + "behavdata.txt")
f.write(behav)
f.close() | Python | 0.000001 | |
f2fb5fc41c78ac7722812aa1cdb54078bcbc70fe | Add Node test cases | coil/test/test_node.py | coil/test/test_node.py | """Tests for coil.struct.Node"""
import unittest
from coil import errors
from coil.struct import Node
class BasicTestCase(unittest.TestCase):
def testInit(self):
r = Node()
a = Node(r, "a")
b = Node(a, "b")
self.assertEquals(b.node_name, "b")
self.assertEquals(b.node_path, "@root.a.b")
self.assert_(b.container is a)
self.assert_(b.tree_root is r)
class PathTestCase(unittest.TestCase):
def setUp(self):
self.r = Node()
self.a = Node(self.r, "a")
self.b = Node(self.a, "b")
def testRelative(self):
self.assertEquals(self.r.relative_path("@root"), ".")
self.assertEquals(self.r.relative_path("@root.a"), "a")
self.assertEquals(self.r.relative_path("@root.a.b"), "a.b")
self.assertEquals(self.r.relative_path("@root.a.b.c"), "a.b.c")
self.assertEquals(self.a.relative_path("@root"), "..")
self.assertEquals(self.a.relative_path("@root.a"), ".")
self.assertEquals(self.a.relative_path("@root.a.b"), "b")
self.assertEquals(self.a.relative_path("@root.a.b.c"), "b.c")
self.assertEquals(self.b.relative_path("@root"), "...")
self.assertEquals(self.b.relative_path("@root.a"), "..")
self.assertEquals(self.b.relative_path("@root.a.b"), ".")
self.assertEquals(self.b.relative_path("@root.a.b.c"), "c")
self.assertEquals(self.b.relative_path("@root.x.y.z"), "...x.y.z")
self.assertEquals(self.b.relative_path("@root.a.x.y"), "..x.y")
def testAbsolute(self):
self.assertEquals(self.r.absolute_path("."), "@root")
self.assertEquals(self.r.absolute_path("a"), "@root.a")
self.assertEquals(self.r.absolute_path(".a"), "@root.a")
self.assertEquals(self.r.absolute_path("a.b"), "@root.a.b")
self.assertEquals(self.b.absolute_path("."), "@root.a.b")
self.assertEquals(self.b.absolute_path(".."), "@root.a")
self.assertEquals(self.b.absolute_path("..."), "@root")
self.assertEquals(self.b.absolute_path("x"), "@root.a.b.x")
self.assertEquals(self.b.absolute_path(".x"), "@root.a.b.x")
self.assertEquals(self.b.absolute_path("..x"), "@root.a.x")
self.assertEquals(self.b.absolute_path("...x"), "@root.x")
self.assertRaises(errors.CoilError,
self.r.absolute_path, "..")
self.assertRaises(errors.CoilError,
self.a.absolute_path, "...")
self.assertRaises(errors.CoilError,
self.b.absolute_path, "....")
| Python | 0.000001 | |
942b7c519a07a84c7f26077b78c23c60174e1141 | Add VCF precalculator | scripts/precalc.py | scripts/precalc.py | # -*- coding: utf-8 -*-
'''
Earth Engine precalculator for CLASlite
Requires Python 2.6+
Public Domain where allowed, otherwise:
Copyright 2010 Michael Geary - http://mg.to/
Use under MIT, GPL, or any Open Source license:
http://www.opensource.org/licenses/
'''
import cgi, json, os, sys, time, urllib2
sys.path.append( os.path.abspath('../web/app') )
import private
base = private.private['earth-engine-api']
auth = private.private['earth-engine-auth']
sat = 'LANDSAT/L7_L1T'
bbox = '-61.6,-11.4,-60.8,-10.6'
def fetch( api ):
req = urllib2.Request(
url = base + api,
headers = { 'Authorization': 'GoogleLogin auth=' + auth }
)
try:
f = urllib2.urlopen( req, None, 600 )
data = f.read()
f.close()
return json.loads( data )
except urllib2.HTTPError, error:
return error.read()
def listImages( sat, bbox ):
return fetch( 'list?id=%s&bbox=%s' %( sat, bbox ) )['data']
def calcVCF( id ):
return fetch( vcfAPI(id) )
def vcfAPI( id ):
return 'value?image={"creator":"CLASLITE/VCFAdjustedImage","args":[{"creator":"CLASLITE/AutoMCU","args":["%s",{"creator":"CLASLITE/Reflectance","args":[{"creator":"CLASLITE/Calibrate","args":["%s"]}]}]},"MOD44B_C4_TREE_2000"]}&fields=vcf_adjustment' %( id, id )
def main():
images = listImages( sat, bbox )
count = len(images)
n = 0
for image in images:
id = image['id']
n += 1
print 'Loading %d/%d: %s' %( n, count, id )
t = time.time()
vcf = calcVCF( id )
t = time.time() - t
report( vcf, t )
def report( vcf, t ):
adjustment = vcf['data']['properties']['vcf_adjustment']
forest = adjustment['forest_pixel_count']
valid = adjustment['valid_pixel_count']
if valid > 0:
percent = forest * 100 / valid
else:
percent = 0
print '%d seconds, %d%% forest' %( t, percent )
if __name__ == "__main__":
main()
| Python | 0.000001 | |
8b9a8f6443c1a5e184ececa4ec03baabca0973de | Add support for Pocket | services/pocket.py | services/pocket.py | from werkzeug.urls import url_decode
import requests
import foauth.providers
class Pocket(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'http://getpocket.com/'
docs_url = 'http://getpocket.com/developer/docs/overview'
category = 'News'
# URLs to interact with the API
request_token_url = 'https://getpocket.com/v3/oauth/request'
authorize_url = 'https://getpocket.com/auth/authorize'
access_token_url = 'https://getpocket.com/v3/oauth/authorize'
api_domain = 'getpocket.com'
available_permissions = [
(None, 'access your saved articles'),
]
supports_state = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Pocket, self).get_authorize_params(redirect_uri, scopes)
r = requests.post(self.request_token_url, data={
'consumer_key': params['client_id'],
'redirect_uri': redirect_uri,
})
data = url_decode(r.content)
redirect_uri = '%s&code=%s' % (params['redirect_uri'], data['code'])
return {
'request_token': data['code'],
'redirect_uri': redirect_uri,
}
def get_access_token_response(self, redirect_uri, data):
return requests.post(self.get_access_token_url(), {
'consumer_key': self.client_id,
'code': data['code'],
'redirect_uri': redirect_uri
})
def parse_token(self, content):
data = url_decode(content)
data['service_user_id'] = data['username']
return data
def bearer_type(self, token, r):
r.prepare_url(r.url, {'consumer_key': self.client_id, 'access_token': token})
return r
| Python | 0 | |
8244d71a41032e41bd79741ec649fa78c6317efa | add mixins for tweaking smartmin behavior more easily | smartmin/mixins.py | smartmin/mixins.py |
# simple mixins that keep you from writing so much code
class PassRequestToFormMixin(object):
def get_form_kwargs(self):
kwargs = super(PassRequestToFormMixin, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
| Python | 0 | |
32dd2099f97add61cb31df7af796876a95695bb1 | Add a sample permission plugin for illustrating the check on realm resources, related to #6211. | sample-plugins/public_wiki_policy.py | sample-plugins/public_wiki_policy.py | from fnmatch import fnmatchcase
from trac.config import Option
from trac.core import *
from trac.perm import IPermissionPolicy
class PublicWikiPolicy(Component):
"""Sample permission policy plugin illustrating how to check
permission on realms.
Don't forget to integrate that plugin in the appropriate place in the
list of permission policies:
{{{
[trac]
permission_policies = PublicWikiPolicy, DefaultPermissionPolicy
}}}
Then you can configure which pages you want to make public:
{{{
[public_wiki]
view = Public*
modify = PublicSandbox/*
}}}
"""
implements(IPermissionPolicy)
view = Option('public_wiki', 'view', 'Public*',
"""Case-sensitive glob pattern used for granting view permission on
all Wiki pages matching it.""")
modify = Option('public_wiki', 'modify', 'Public*',
"""Case-sensitive glob pattern used for granting modify permissions
on all Wiki pages matching it.""")
def check_permission(self, action, username, resource, perm):
if resource: # fine-grained permission check
if resource.realm == 'wiki': # wiki realm or resource
if resource.id: # ... it's a resource
if action == 'WIKI_VIEW': # (think 'VIEW' here)
pattern = self.view
else:
pattern = self.modify
if fnmatchcase(resource.id, pattern):
return True
else: # ... it's a realm
return True
# this policy ''may'' grant permissions on some wiki pages
else: # coarse-grained permission check
#
# support for the legacy permission checks: no resource specified
# and realm information in the action name itself.
#
if action.startswith('WIKI_'):
return True
# this policy ''may'' grant permissions on some wiki pages
| Python | 0.000003 | |
1c39eb113be409ff304a675ef8a85e96a97b1d87 | Add files via upload | basicTwitter.py | basicTwitter.py | '''
RIT SPEX: Twitter posting basic.
Basic python script for posting to twitter.
Pre-Req:
Python3
Tweepy library twitter
Contributors:
Evan Putnam
Henry Yaeger
John LeBrun
Helen O'Connell
'''
import tweepy
#Tweet a picutre
def tweetPicture(api ,picUrl):
api.update_with_media(picUrl)
#Tweet a post
def tweetPost(api, postStr):
api.update_status(postStr)
def apiSetUp(conKey, conSec, accTok, accSec):
'''
Sets up the api object.
:param conKey:
:param conSec:
:param accTok:
:param accSec:
:return:
'''
#Authenicates keys...
auth = tweepy.OAuthHandler(conKey, conSec)
auth.set_access_token(accTok, accSec)
#Api object
api = tweepy.API(auth)
return api
def main():
"""
NOTE: Do not send code to others with the consumer keys and access tokens. It will allow them to access your twitter
application. This program is simple. Enter 1 to post a twitter text post and 2 for an image post...
:return:
"""
#REPLACE WITH CONSUMER KEYS
conKey = ""
conSec = ""
#REPLACE WITH ACCESS TOKENS
accTok = ""
accSec = ""
if conKey == "" or conSec == "" or accTok == "" or accSec == "":
print("WARNING YOU HAVE NOT ENTERERED YOUR INFORMATION")
#Authenicates keys...
auth = tweepy.OAuthHandler(conKey, conSec)
auth.set_access_token(accTok, accSec)
#Api object
api = tweepy.API(auth)
print("Press and enter 1 to post a text tweet")
print("Press and enter 2 to post an image tweet")
option = int(input("Enter Option(1 or 2):"))
if option == 1:
post = (input("Enter Post:"))
tweetPost(api, post)
elif option == 2:
print("Image must be in folder of program")
imagePath = (input("Enter Image Path:"))
tweetPicture(api,imagePath)
if __name__ == '__main__':
main()
| Python | 0 | |
eb170653e64c5a874a773dc37c99dccb4dd42608 | Add tools.color module (#41, #36)) | xentica/tools/color.py | xentica/tools/color.py | """A collection of color conversion helpers."""
def hsv2rgb(hue, sat, val):
"""
Convert HSV color to RGB format.
:param hue: Hue value [0, 1]
:param sat: Saturation value [0, 1]
:param val: Brightness value [0, 1]
:returns: tuple (red, green, blue)
"""
raise NotImplementedError
def rgb2hsv(red, green, blue):
"""
Convert RGB color to HSV format.
:param red: Red value [0, 1]
:param green: Green value [0, 1]
:param blue: Blue value [0, 1]
:returns: tuple (hue, sat, val)
"""
raise NotImplementedError
def genome2rgb(genome):
"""
Convert genome bit value to RGB color.
:param genome: Genome as integer (bit) sequence.
:returns: tuple (red, green, blue)
"""
raise NotImplementedError
| Python | 0 | |
1d5f1576a5f92c1917fa29c457e4b7ad055f41ca | Add info for mono@5.4.0.167 (#5405) | var/spack/repos/builtin/packages/mono/package.py | var/spack/repos/builtin/packages/mono/package.py | ###############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Mono(AutotoolsPackage):
"""Mono is a software platform designed to allow developers to easily
create cross platform applications. It is an open source
implementation of Microsoft's .NET Framework based on the ECMA
standards for C# and the Common Language Runtime.
"""
homepage = "http://www.mono-project.com/"
url = "https://download.mono-project.com/sources/mono/mono-5.0.1.1.tar.bz2"
# /usr/share/.mono/keypairs needs to exist or be able to be
# created, e.g. https://github.com/gentoo/dotnet/issues/6
variant('patch-folder-path', default=False,
description='Point SpecialFolder.CommonApplicationData folder '
'into Spack installation instead of /usr/share')
# Spack's openssl interacts badly with mono's vendored
# "boringssl", don't drag it in w/ cmake
depends_on('cmake~openssl', type=('build'))
depends_on('libiconv')
depends_on('perl', type=('build'))
version('5.4.0.167', '103c7a737632046a9e9a0b039d752ee1')
version('5.0.1.1', '17692c7a797f95ee6f9a0987fda3d486')
version('4.8.0.524', 'baeed5b8139a85ad7e291d402a4bcccb')
def patch(self):
if '+patch-folder-path' in self.spec:
before = 'return "/usr/share";'
after = 'return "{0}";'.format(self.prefix.share)
f = 'mcs/class/corlib/System/Environment.cs'
kwargs = {'ignore_absent': False, 'backup': True, 'string': True}
filter_file(before, after, f, **kwargs)
def configure_args(self):
args = []
li = self.spec['libiconv'].prefix
args.append('--with-libiconv-prefix={p}'.format(p=li))
return args
| ###############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Mono(AutotoolsPackage):
"""Mono is a software platform designed to allow developers to easily
create cross platform applications. It is an open source
implementation of Microsoft's .NET Framework based on the ECMA
standards for C# and the Common Language Runtime.
"""
homepage = "http://www.mono-project.com/"
url = "https://download.mono-project.com/sources/mono/mono-5.0.1.1.tar.bz2"
# /usr/share/.mono/keypairs needs to exist or be able to be
# created, e.g. https://github.com/gentoo/dotnet/issues/6
variant('patch-folder-path', default=False,
description='Point SpecialFolder.CommonApplicationData folder '
'into Spack installation instead of /usr/share')
# Spack's openssl interacts badly with mono's vendored
# "boringssl", don't drag it in w/ cmake
depends_on('cmake~openssl', type=('build'))
depends_on('libiconv')
depends_on('perl', type=('build'))
version('5.0.1.1', '17692c7a797f95ee6f9a0987fda3d486')
version('4.8.0.524', 'baeed5b8139a85ad7e291d402a4bcccb')
def patch(self):
if '+patch-folder-path' in self.spec:
before = 'return "/usr/share";'
after = 'return "{0}";'.format(self.prefix.share)
f = 'mcs/class/corlib/System/Environment.cs'
kwargs = {'ignore_absent': False, 'backup': True, 'string': True}
filter_file(before, after, f, **kwargs)
def configure_args(self):
args = []
li = self.spec['libiconv'].prefix
args.append('--with-libiconv-prefix={p}'.format(p=li))
return args
| Python | 0 |
d03edd6670c130925fa8b947ebde03f2026602c3 | remove redundant verbose prints | dj/scripts/mk_public.py | dj/scripts/mk_public.py | #!/usr/bin/python
# mk_public.py - flip state on hosts from private to public
# private = not listed, can be seen if you know the url
# the presenters have been emaild the URL,
# they are encouraged to advertise it.
# public = advertised, it is ready for the world to view.
# It will be tweeted at @NextDayVideo
from steve.richardapi import update_video, MissingRequiredData
from steve.restapi import API, get_content
import youtube_uploader
import gdata.youtube
from gdata.media import YOUTUBE_NAMESPACE
from atom import ExtensionElement
import atom
import pw
from process import process
import pprint
from main.models import Show, Location, Episode, Raw_File, Cut_List
class mk_public(process):
ready_state = 9
def up_richard(self, ep):
host = pw.richard[ep.show.client.richard_id]
endpoint = 'http://{hostname}/api/v1'.format(hostname=host['host'])
api = API(endpoint)
vid = ep.public_url.split('/video/')[1].split('/')[0]
response = api.video(vid).get(
username=host['user'], api_key=host['api_key'])
video_data = get_content(response)
video_data['state'] = 1
try:
update_video(endpoint, host['user'], host['api_key'],
vid, video_data)
except MissingRequiredData, e:
# this shouldn't happen, prolly debugging something.
import code
code.interact(local=locals())
return True
def up_youtube(self, ep):
uploader = youtube_uploader.Uploader()
uploader.user = ep.show.client.youtube_id
return uploader.set_permission( ep.host_url )
def process_ep(self, ep):
# set youtube to public
# set richard state to live
ret = True # if something breaks, this will be false
# don't make public if there is no host_url (youtube)
if ep.public_url and ep.host_url:
ret = ret and self.up_richard(ep)
if self.options.verbose: print "Richard public."
if ep.host_url:
ret = ret and self.up_youtube(ep)
if self.options.verbose: print "Youtube public."
return ret
if __name__ == '__main__':
p=mk_public()
p.main()
| #!/usr/bin/python
# mk_public.py - flip state on hosts from private to public
# private = not listed, can be seen if you know the url
# the presenters have been emaild the URL,
# they are encouraged to advertise it.
# public = advertised, it is ready for the world to view.
# It will be tweeted at @NextDayVideo
from steve.richardapi import update_video, MissingRequiredData
from steve.restapi import API, get_content
import youtube_uploader
import gdata.youtube
from gdata.media import YOUTUBE_NAMESPACE
from atom import ExtensionElement
import atom
import pw
from process import process
import pprint
from main.models import Show, Location, Episode, Raw_File, Cut_List
class mk_public(process):
ready_state = 9
def up_richard(self, ep):
host = pw.richard[ep.show.client.richard_id]
endpoint = 'http://{hostname}/api/v1'.format(hostname=host['host'])
api = API(endpoint)
vid = ep.public_url.split('/video/')[1].split('/')[0]
response = api.video(vid).get(
username=host['user'], api_key=host['api_key'])
video_data = get_content(response)
video_data['state'] = 1
try:
update_video(endpoint, host['user'], host['api_key'],
vid, video_data)
except MissingRequiredData, e:
# this shouldn't happen, prolly debugging something.
import code
code.interact(local=locals())
return True
def up_youtube(self, ep):
uploader = youtube_uploader.Uploader()
uploader.user = ep.show.client.youtube_id
return uploader.set_permission( ep.host_url )
def process_ep(self, ep):
if self.options.verbose: print ep.id, ep.name
# set youtube to public
# set richard state to live
ret = True # if something breaks, this will be false
# don't make public if there is no host_url (youtube)
if ep.public_url and ep.host_url:
ret = ret and self.up_richard(ep)
if self.options.verbose: print "Richard public."
if ep.host_url:
ret = ret and self.up_youtube(ep)
if self.options.verbose: print "Youtube public."
return ret
if __name__ == '__main__':
p=mk_public()
p.main()
| Python | 0.956212 |
197fb6ec004c0bf47ec7e2fd25b75564a3ecf6c4 | Add tests for logging of rest requests | test/audit_logs/test_audit_log.py | test/audit_logs/test_audit_log.py | import datetime
import pytest
from girder import auditLogger
@pytest.fixture
def recordModel():
from girder.plugins.audit_logs import Record
yield Record()
@pytest.fixture
def resetLog():
yield auditLogger
for handler in auditLogger.handlers:
auditLogger.removeHandler(handler)
@pytest.mark.plugin('audit_logs')
def testAnonymousRestRequestLogging(server, recordModel, resetLog):
assert list(recordModel.find()) == []
server.request('/user/me')
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['ip'] == '127.0.0.1'
assert record['type'] == 'rest.request'
assert record['userId'] == None
assert isinstance(record['when'], datetime.datetime)
assert record['details']['method'] == 'GET'
assert record['details']['status'] == 200
assert record['details']['route'] == ['user', 'me']
assert record['details']['params'] == {}
@pytest.mark.plugin('audit_logs')
def testFailedRestRequestLogging(server, recordModel, resetLog):
server.request('/folder', method='POST', params={
'name': 'Foo',
'parentId': 'foo'
})
records = recordModel.find()
assert records.count() == 1
details = records[0]['details']
assert details['method'] == 'POST'
assert details['status'] == 401
assert details['route'] == ['folder']
assert details['params'] == {
'name': 'Foo',
'parentId': 'foo'
}
@pytest.mark.plugin('audit_logs')
def testAuthenticatedRestRequestLogging(server, recordModel, resetLog, admin):
server.request('/user/me', user=admin)
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['userId'] == admin['_id']
| Python | 0 | |
9e96a7ff9ad715f58d07341bd571e63ef233ffdb | Create fizzbuzz.py | job_interview_algs/fizzbuzz.py | job_interview_algs/fizzbuzz.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Created on 24 02 2016
@author: vlad
'''
def multiple_of_3(number):
return number % 3 == 0
def multiple_of_5(number):
return number % 5 == 0
for i in range(1, 100):
if not multiple_of_3(i) and not multiple_of_5(i):
print i
continue
if multiple_of_3(i) and multiple_of_5(i):
print "fizzbuzz"
continue
else:
print ["fizz", "buzz"][multiple_of_5(i)]
| Python | 0.00001 | |
8089750d5dccadb0603068eefec869df4f8360cc | Add fizzbuzz.py in strings folder | strings/fizzbuzz.py | strings/fizzbuzz.py | """
Wtite a function that returns an array containing the numbers from 1 to N,
where N is the parametered value. N will never be less than 1.
Replace certain values however if any of the following conditions are met:
If the value is a multiple of 3: use the value 'Fizz' instead
If the value is a multiple of 5: use the value 'Buzz' instead
If the value is a multiple of 3 & 5: use the value 'FizzBuzz' instead
"""
"""
There is no fancy algorithm to solve fizz buzz.
Iterate from 1 through n
Use the mod operator to determine if the current iteration is divisible by:
3 and 5 -> 'FizzBuzz'
3 -> 'Fizz'
5 -> 'Buzz'
else -> string of current iteration
return the results
Complexity:
Time: O(n)
Space: O(n)
"""
def fizzbuzz(n):
# Validate the input
if n < 1:
raise ValueError('n cannot be less than one')
if n is None:
raise TypeError('n cannot be None')
result = []
for i in range(1, n+1):
if i%3 == 0 and i%5 == 0:
result.append('FizzBuzz')
elif i%3 == 0:
result.append('Fizz')
elif i%5 == 0:
result.append('Buzz')
else:
result.append(i)
return result
# Alternative solution
def fizzbuzz_with_helper_func(n):
return [fb(m) for m in range(1,n+1)]
def fb(m):
r = (m % 3 == 0) * "Fizz" + (m % 5 == 0) * "Buzz"
return r if r != "" else m
| Python | 0.000001 | |
9ea3c14983c7b2e32132f1ffe6bbbe7b4d19000c | Add Flyweight.py | Python/Flyweight/Flyweight.py | Python/Flyweight/Flyweight.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
'''
Flyweight Pattern
Author: Kei Nakata
Data: Oct.14.2014
'''
class FlyweightFactory(object):
def __init__(self):
self.instances = dict()
def getInstance(self, a, b):
if (a, b) not in self.instances:
self.instances[(a,b)] = Flyweight(a, b)
return self.instances[(a, b)]
class Flyweight(object):
def __init__(self, a, b):
self.a = a
self.b = b
def method(self):
print self.a, self.b
if __name__ == '__main__':
factory = FlyweightFactory()
a = factory.getInstance(1, 2)
b = factory.getInstance(3, 2)
c = factory.getInstance(1, 2)
a.method()
b.method()
c.method()
print id(a)
print id(b)
print id(c)
| Python | 0.000005 | |
868a771e0ba049edd55ddf38db852c4d34824297 | Add pod env tests | tests/test_spawner/test_pod_environment.py | tests/test_spawner/test_pod_environment.py | from unittest import TestCase
from scheduler.spawners.templates.pod_environment import (
get_affinity,
get_node_selector,
get_tolerations
)
class TestPodEnvironment(TestCase):
def test_pod_affinity(self):
assert get_affinity(None, None) is None
assert get_affinity({'foo': 'bar'}, None) == {'foo': 'bar'}
assert get_affinity(None, '{"foo": "bar"}') == {'foo': 'bar'}
assert get_affinity({'foo': 'bar'}, '{"foo": "moo"}') == {'foo': 'bar'}
def get_pod_node_selector(self):
assert get_node_selector(None, None) is None
assert get_node_selector({'foo': 'bar'}, None) == {'foo': 'bar'}
assert get_node_selector(None, '{"foo": "bar"}') == {'foo': 'bar'}
assert get_node_selector({'foo': 'bar'}, '{"foo": "moo"}') == {'foo': 'bar'}
def get_pod_tolerations(self):
assert get_tolerations(None, None) is None
assert get_tolerations([{'foo': 'bar'}], None) == [{'foo': 'bar'}]
assert get_tolerations(None, '[{"foo": "bar"}]') == [{'foo': 'bar'}]
assert get_tolerations([{'foo': 'bar'}], '[{"foo": "moo"}]') == {'foo': 'bar'}
| Python | 0 | |
d5b95c31cb50762a6aee1b460b377a200b34e249 | Create createCW_readtxt.py | createCW_readtxt.py | createCW_readtxt.py | def readtext(f):
with open(f.loc,encoding="ISO-8859-1") as k:
content = k.read().splitlines()
i=0
width=0
height=0
acc=0
across=[]
dwn=0
down=[]
solnblock=[]
title=""
author=""
cpyrt=""
notes=""
Encoding_2 = "ISO-8859-1"
while(content[i]!="<TITLE>"):
i=i+1
i=i+1
temp=""
while(content[i]!="</TITLE>"):
temp=temp+content[i]
i=i+1
title=temp
i=i+1
if(content[i]=="<AUTHOR>"):
i=i+1
temp=""
while(content[i]!="</AUTHOR>"):
temp=temp+content[i]
i=i+1
author=temp
i=i+1
if(content[i]=="<COPYRIGHT>"):
i=i+1
temp=""
while(content[i]!="</COPYRIGHT>"):
temp=temp+content[i]
i=i+1
cpyrt=temp
i=i+1
if(content[i]=="<WIDTH>"):
i=i+1
temp=""
while(content[i]!="</WIDTH>"):
temp=temp+content[i]
i=i+1
width=int(temp)
i=i+1
if(content[i]=="<HEIGHT>"):
i=i+1
temp=""
while(content[i]!="</HEIGHT>"):
temp=temp+content[i]
i=i+1
height=int(temp)
i=i+1
if(content[i]=="<GRID>"):
i=i+1
r=0
while(content[i]!="</GRID>"):
j=0
solnblock.append([])
while(True):
temp=""
while(content[i][j]!=","):
temp=temp+content[i][j]
j=j+1
solnblock[r].append(temp)
j=j+1
if(j==len(content[i])):
break
r=r+1
i=i+1
i=i+1
if(content[i]=="<ACROSS>"):
i=i+1
while(content[i]!="</ACROSS>"):
if(content[i]!="<NULL>"):
across.append(content[i])
else:
across.append("")
i=i+1
acc=len(across)
i=i+1
if(content[i]=="<DOWN>"):
i=i+1
while(content[i]!="</DOWN>"):
if(content[i]!="<NULL>"):
down.append(content[i])
else:
down.append("")
i=i+1
dwn=len(down)
i=i+1
if(content[i]=="<NOTEPAD>"):
i=i+1
temp=""
while(content[i]!="</NOTEPAD>"):
temp=temp+content[i]
i=i+1
notes=temp
f.title=title
f.author=author
f.cpyrt=cpyrt
f.notes=notes
f.width=width
f.height=height
f.solnblock=solnblock
f.acc=acc
f.dwn=dwn
f.across=across
f.down=down
f.loc=""
return f
def writetext(f):
Encoding_2 = "ISO-8859-1"
ofile=open(f.loc,mode='wb')
ofile.write(("<TITLE>"+"\n").encode(Encoding_2))
msg=f.title+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</TITLE>"+"\n").encode(Encoding_2))
ofile.write(("<AUTHOR>"+"\n").encode(Encoding_2))
msg=f.author+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</AUTHOR>"+"\n").encode(Encoding_2))
ofile.write(("<COPYRIGHT>"+"\n").encode(Encoding_2))
msg=f.cpyrt+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</COPYRIGHT>"+"\n").encode(Encoding_2))
ofile.write(("<WIDTH>"+"\n").encode(Encoding_2))
msg=str(f.width)+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</WIDTH>"+"\n").encode(Encoding_2))
ofile.write(("<HEIGHT>"+"\n").encode(Encoding_2))
msg=str(f.height)+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</HEIGHT>"+"\n").encode(Encoding_2))
ofile.write(("<GRID>"+"\n").encode(Encoding_2))
for i in range(0,f.height):
for j in range(0,f.width):
msg=f.solnblock[i][j]+","
ofile.write(msg.encode(Encoding_2))
ofile.write(("\n").encode(Encoding_2))
ofile.write(("</GRID>"+"\n").encode(Encoding_2))
ofile.write(("<ACROSS>"+"\n").encode(Encoding_2))
for i in range(0,f.acc):
if(f.across[i][1]==""):
msg="<NULL>"+"\n"
else:
msg=f.across[i][1]+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</ACROSS>"+"\n").encode(Encoding_2))
ofile.write(("<DOWN>"+"\n").encode(Encoding_2))
for i in range(0,f.dwn):
if(f.down[i][1]==""):
msg="<NULL>"+"\n"
else:
msg=f.down[i][1]+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</DOWN>"+"\n").encode(Encoding_2))
ofile.write(("<NOTEPAD>"+"\n").encode(Encoding_2))
msg=f.notes+"\n"
ofile.write(msg.encode(Encoding_2))
ofile.write(("</NOTEPAD>"+"\n").encode(Encoding_2))
ofile.close()
| Python | 0.000004 | |
9ea17a6d3eb5f9af246be964651f5bef522f2d95 | Map out the TrustChain data structure | src/trustchain.py | src/trustchain.py | class TrustChain:
"""
Node maintains one TrustChain object and interacts with it either in in the reactor process or some other process.
If it's the latter, there needs to be some communication mechanism.
type System = Map<Node, Chain>;
// height (sequence number) should match the index
type Chain = List<Block>;
struct Node {
pk: [u8; 32],
addr: SocketAddr,
// ...
}
struct Signature {
pk: [u8; 32],
sig: [u8, 32],
}
enum Block {
TxBlock,
CpBlock,
}
struct TxBlock {
prev: Digest,
h_s: u64,
h_r: u64,
s_s: Signature,
s_r: Signature,
m: String,
// items below are not a part of the block digest
validity: Valid | Invalid | Unknown
}
struct Cons {
round: u64,
blocks: List<CpBlock>,
ss: List<Signature>,
}
struct CpBlock {
prev: Digest,
round: u64, // of the Cons
con: Digest, // of the Cons
p: bool, // promoter registration
s: Signature,
}
"""
def __init__(self):
self.myself = None
self.system = None
def new_tx(self, tx):
"""
Verify tx, follow the rule and mutates the state to add it
:return: None
"""
pass
def new_cp(self, cp):
"""
Verify the cp, follow the rule a nd mutate the state to add it
:return: None
"""
pass
def pieces(self, tx):
"""
tx must exist, return the pieces of tx
:param tx:
:return: List<Block>
"""
pass
def verify(self, tx, resp):
"""
:param tx:
:param resp:
:return:
"""
pass
def _enclosure(self, tx):
"""
:param tx:
:return: (CpBlock, CpBlock)
"""
pass | Python | 0.999389 | |
1c1e933fa9c6af1aa9d73f276ac7b79c2b86bdc3 | add svn-clean-external-file.py | scripts/svn-clean-external-file.py | scripts/svn-clean-external-file.py | # written by Thomas Watnedal
# http://stackoverflow.com/questions/239340/automatically-remove-subversion-unversioned-files
import os
import re
def removeall(path):
if not os.path.isdir(path):
os.remove(path)
return
files=os.listdir(path)
for x in files:
fullpath=os.path.join(path, x)
if os.path.isfile(fullpath):
os.remove(fullpath)
elif os.path.isdir(fullpath):
removeall(fullpath)
os.rmdir(path)
unversionedRex = re.compile('^ ?[\?ID] *[1-9 ]*[a-zA-Z]* +(.*)')
for l in os.popen('svn status --no-ignore -v').readlines():
match = unversionedRex.match(l)
if match: removeall(match.group(1))
| Python | 0.000002 | |
3ef1e39d476a8b3e41ff0b06dcd6f700c083682d | Add an ABC for all sub classes of `DataController` | data_controller/abc.py | data_controller/abc.py | from typing import Dict, Optional
from data_controller.enums import Medium, Site
from utils.helpers import await_func
class DataController:
"""
An ABC for all classes that deals with database read write.
"""
__slots__ = ()
def get_identifier(self, query: str,
medium: Medium) -> Optional[Dict[Site, str]]:
"""
Get the identifier of a given search query.
:param query: the search query.
:param medium: the medium type.
:return: A dict of all identifiers for this search query for all sites,
None if nothing is found.
"""
raise NotImplementedError
def set_identifier(self, name: str, medium: Medium,
site: Site, identifier: str):
"""
Set the identifier for a given name.
:param name: the name.
:param medium: the medium type.
:param site: the site.
:param identifier: the identifier.
"""
raise NotImplementedError
def get_mal_title(self, id_: str, medium: Medium) -> Optional[str]:
"""
Get a MAL title by its id.
:param id_: th MAL id.
:param medium: the medium type.
:return: The MAL title if it's found.
"""
raise NotImplementedError
def set_mal_title(self, id_: str, medium: Medium, title: str):
"""
Set the MAL title for a given id.
:param id_: the MAL id.
:param medium: The medium type.
:param title: The MAL title for the given id.
"""
raise NotImplementedError
def medium_data_by_id(self, id_: str, medium: Medium,
site: Site) -> Optional[dict]:
"""
Get data by id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:return: the data for that id if found.
"""
raise NotImplementedError
def set_medium_data(self, id_: str, medium: Medium, site: Site, data: dict):
"""
Set the data for a given id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:param data: the data for the id.
"""
raise NotImplementedError
async def get_medium_data(self, query: str,
medium: Medium, loop=None) -> Optional[dict]:
"""
Get the cached data for the given search query.
:param query: the search query.
:param medium: the medium type.
:param loop: the asyncio event loop, optional. If None is provided,
will use the default event loop.
:return: the cached data, for all sites that has the data.
"""
id_dict = await await_func(
self.get_identifier, loop, query, medium
)
if not id_dict:
return
return {site: data for site, data in {
site: await await_func(self.medium_data_by_id, loop,
id_, medium, site)
for site, id_ in id_dict.items()}.items() if data}
| Python | 0.000003 | |
2ce67897ade1ce8ae8b0fd00671fe61f4164a2bc | Add missing migration | oidc_apis/migrations/0002_add_multiselect_field_ad_groups_option.py | oidc_apis/migrations/0002_add_multiselect_field_ad_groups_option.py | # Generated by Django 2.0.9 on 2018-10-15 08:08
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('oidc_apis', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='api',
name='required_scopes',
field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('email', 'E-mail'), ('profile', 'Profile'), ('address', 'Address'), ('github_username', 'GitHub username'), ('ad_groups', 'AD Groups')], help_text='Select the scopes that this API needs information from. Information from the selected scopes will be included to the API Tokens.', max_length=1000, verbose_name='required scopes'),
),
]
| Python | 0.0002 | |
785a5767ee3482fddee37327b4bf3edeed94ff46 | Add shootout attempt item definition | db/shootout_attempt.py | db/shootout_attempt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.player import Player
from db.team import Team
class ShootoutAttempt(Base, SpecificEvent):
__tablename__ = 'shootout_attempts'
__autoload__ = True
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "goalie_team_id", "goalie_id",
"attempt_type", "shot_type", "miss_type", "distance", "on_goal",
"scored"
]
def __init__(self, event_id, data_dict):
self.shootout_attempt_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
if attr in ['scored', 'on_goal']:
setattr(self, attr, False)
else:
setattr(self, attr, None)
def __str__(self):
player = Player.find_by_id(self.player_id)
goalie = Player.find_by_id(self.goalie_id)
plr_team = Team.find_by_id(self.team_id)
goalie_team = Team.find_by_id(self.goalie_team_id)
if self.attempt_type == 'GOAL':
return "Shootout Goal: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
elif self.attempt_type == 'MISS':
return "Shootout Miss: %s (%s) %s, %d ft., %s vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
self.miss_type, goalie.name, goalie_team.abbr)
elif self.attempt_type == 'SHOT':
return "Shootout Shot: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
| Python | 0 | |
1dcf698a286dcdf0f2c5a70d3e9bb2b32d046604 | add TestBEvents, currently skipped | tests/unit/Events/test_BEvents.py | tests/unit/Events/test_BEvents.py | from AlphaTwirl.Events import BEvents as Events
from AlphaTwirl.Events import Branch
import unittest
import ROOT
##____________________________________________________________________________||
inputPath = '/Users/sakuma/work/cms/c150130_RA1_data/c150130_01_PHYS14/20150331_SingleMu/TTJets/treeProducerSusyAlphaT/tree.root'
treeName = 'tree'
##____________________________________________________________________________||
@unittest.skip("skip BEvents")
class TestBEvents(unittest.TestCase):
def test_branch(self):
inputFile = ROOT.TFile.Open(inputPath)
tree = inputFile.Get(treeName)
events = Events(tree)
jet_pt = events.jet_pt
met_pt = events.met_pt
self.assertIsInstance(jet_pt, Branch)
self.assertIsInstance(met_pt, Branch)
self.assertEqual(0, len(jet_pt))
self.assertEqual(1, len(met_pt))
self.assertEqual(0.0, met_pt[0])
tree.GetEntry(0)
self.assertEqual(2, len(jet_pt))
self.assertEqual(1, len(met_pt))
self.assertEqual(124.55626678466797, jet_pt[0])
self.assertEqual(86.90544128417969, jet_pt[1])
self.assertAlmostEqual(43.783382415771484, met_pt[0])
tree.GetEntry(1)
self.assertEqual(3, len(jet_pt))
self.assertEqual(1, len(met_pt))
self.assertEqual(112.48554992675781, jet_pt[0])
self.assertEqual(52.32780075073242, jet_pt[1])
self.assertEqual(48.861289978027344, jet_pt[2])
self.assertAlmostEqual(20.483951568603516, met_pt[0])
##____________________________________________________________________________||
| Python | 0 | |
05a8129787d32bb605fee9b85c1c11e8c582c43e | Add messages utility tests | tests/unit/utils/test_messages.py | tests/unit/utils/test_messages.py | import mock
import sys
import unittest
from bin.commands.utils import messages
class TestMessages(unittest.TestCase):
def setUp(self):
# store private methods so they can be restored after tests that mock them
self._print = messages._print
def tearDown(self):
messages._print = self._print
@mock.patch('__builtin__.print')
def test__print(self, mock_print):
# given
message = 'message'
prefix = 'a prefix:'
quiet = False
exit_ = False
file_ = sys.stdout
# then
messages._print(message, prefix, quiet, exit_, file_)
# then
mock_print.assert_called_once_with(prefix + ' ' + message, file=file_)
@mock.patch('__builtin__.print')
def test__print_quiet(self, mock_print):
# given
message = 'message'
prefix = 'a prefix:'
quiet = True
exit_ = False
file_ = sys.stdout
# then
messages._print(message, prefix, quiet, exit_, file_)
# then
mock_print.assert_not_called()
@mock.patch('__builtin__.print')
def test__print_andExit(self, mock_print):
# given
message = 'message'
prefix = 'a prefix:'
quiet = False
exit_ = True
file_ = sys.stdout
# then
try:
messages._print(message, prefix, quiet, exit_, file_)
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
# then
mock_print.assert_called_once_with(prefix + ' ' + message, file=file_)
def test__print_messageNotAStr(self):
# when
with self.assertRaises(AssertionError) as context:
messages._print(123)
# then
self.assertEqual(context.exception.message, 'message must be a str')
def test__print_prefixNotAStr(self):
# when
with self.assertRaises(AssertionError) as context:
messages._print('message', prefix=123)
# then
self.assertEqual(context.exception.message, 'prefix must be a str')
def test__print_quietNotABool(self):
# when
with self.assertRaises(AssertionError) as context:
messages._print('message', quiet='false')
# then
self.assertEqual(context.exception.message, 'quiet must be a bool')
def test__print_exitNotABool(self):
# when
with self.assertRaises(AssertionError) as context:
messages._print('message', exit_='false')
# then
self.assertEqual(context.exception.message, 'exit must be a bool')
@mock.patch('bin.commands.utils.messages._print')
def test_error(self, mock_print):
# given
message = 'the message'
prefix = 'error prefix'
exit_ = False
# when
messages.error(message, prefix=prefix, exit_=exit_)
# then
mock_print.assert_called_once_with(message, prefix=prefix, exit_=exit_, file_=sys.stderr)
@mock.patch('bin.commands.utils.messages._print')
def test_warn(self, mock_print):
# given
message = 'the message'
# when
messages.warn(message)
# then
mock_print.assert_called_once_with(message, prefix='warn:')
@mock.patch('bin.commands.utils.messages._print')
def test_usage(self, mock_print):
# given
message = 'the message'
# when
messages.usage(message)
# then
mock_print.assert_called_once_with(message, prefix='usage:')
@mock.patch('bin.commands.utils.messages._print')
def test_info(self, mock_print):
# given
message = 'the message'
quiet = True
# when
messages.info(message, quiet)
# then
mock_print.assert_called_once_with(message, quiet=quiet) | Python | 0 | |
adf65027521124ea89e9c6c5ee2baf7366b2da46 | Add example settings file for makam extractors | compmusic/extractors/makam/settings.example.py | compmusic/extractors/makam/settings.example.py | token = "" # Dunya API Token
| Python | 0 | |
9722bc3fc0a3cf8c95e91571b4b085e07e5a124c | Create 6kyu_message_from_aliens.py | Solutions/6kyu/6kyu_message_from_aliens.py | Solutions/6kyu/6kyu_message_from_aliens.py | import re
from collections import Counter
d={
'|-|':'h',
'[-':'e',
'()':'o',
'3]':'b',
'_|':'l',
'|':'i',
'^|':'p',
'/`':'y',
')(':'o',
'?/':'r',
'\/':'a',
'|\|':'n',
'</':'k',
'~|~':'t',
'=/':'f',
')|':'d',
'|_|':'u',
'(':'c',
'-[':'e',
'~\_':'s',
'-[':'e',
']3':'b',
'_/~':'z',
'/\\/\\':'w',
'<>':'x',
'/\\':'v',
'|/\|':'m',
'_)(':'q',
'T_':'j',
',_(':'g',
'__':' '
}
def decode(m):
splitters=["]","}",".","'","+"]
splitter=[i for i,j in Counter(m).most_common()][0]
r=[]
for f in re.split('('+re.escape(splitter)+')+', m[::-1]):
try: r.append(d[f])
except: continue
return ''.join(r)
| Python | 0.000027 | |
125c75ea246c2d95f0addbb31b2d82dde588f21d | Add a unit test for KaggleKernelCredentials. | tests/test_kaggle_kernel_credentials.py | tests/test_kaggle_kernel_credentials.py | import unittest
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
class TestKaggleKernelCredentials(unittest.TestCase):
def test_default_target(self):
creds = KaggleKernelCredentials()
self.assertEqual(GcpTarget.BIGQUERY, creds.target)
| Python | 0 | |
5e7f29a66e440e440b1f7a4848d17bb7ae01139b | Update from template. | ci/bootstrap.py | ci/bootstrap.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
from os.path import exists
from os.path import join
if __name__ == "__main__":
base_path = join(".tox", "configure")
if sys.platform == "win32":
bin_path = join(base_path, "Scripts")
else:
bin_path = join(base_path, "bin")
if not exists(base_path):
import subprocess
print("Bootstrapping ...")
try:
subprocess.check_call(["virtualenv", base_path])
except Exception:
subprocess.check_call([sys.executable, "-m", "virtualenv", base_path])
print("Installing `jinja2` and `matrix` into bootstrap environment ...")
subprocess.check_call([join(bin_path, "pip"), "install", "jinja2", "matrix"])
activate = join(bin_path, "activate_this.py")
exec(compile(open(activate, "rb").read(), activate, "exec"), dict(__file__=activate))
import jinja2
import matrix
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join("ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = {}
for (alias, conf) in matrix.from_file("setup.cfg").items():
python = conf["python_versions"]
deps = conf["dependencies"]
if "coverage_flags" in conf:
cover = {"false": False, "true": True}[conf["coverage_flags"].lower()]
if "environment_variables" in conf:
env_vars = conf["environment_variables"]
tox_environments[alias] = {
"python": "python" + python if "py" not in python else python,
"deps": deps.split(),
}
if "coverage_flags" in conf:
tox_environments[alias].update(cover=cover)
if "environment_variables" in conf:
tox_environments[alias].update(env_vars=env_vars.split())
for name in os.listdir(join("ci", "templates")):
with open(name, "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
| Python | 0 | |
db76495b4f41021e2613d79b6f5cb30c96fb4290 | Add PriorityStore and PriorityItem | desmod/prioritystore.py | desmod/prioritystore.py | from collections import namedtuple
from heapq import heappush, heappop
from simpy import Store
class PriorityItem(namedtuple('PriorityItem', 'priority item')):
def __lt__(self, other):
return self.priority < other.priority
class PriorityStore(Store):
def _do_put(self, event):
if len(self.items) < self._capacity:
heappush(self.items, event.item)
event.succeed()
def _do_get(self, event):
if self.items:
event.succeed(heappop(self.items))
| Python | 0 | |
c74170968b3a200f17af083f027fe3b657cf6041 | Add giveup function (#12) | singer/requests.py | singer/requests.py | def giveup_on_http_4xx_except_429(error):
response = error.response
if response is None:
return False
return not (response.status_code == 429 or
response.status_code >= 500)
| Python | 0 | |
b59745e72a3c0a98da517f00e95fbafcff0cee3d | Remove unnecessary import. | tests/test_charts_snep_input.py | tests/test_charts_snep_input.py | from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase, use_vcr
class TestChartsSnepInput(FlexGetBase):
__yaml__ = """
tasks:
test:
charts_snep_input: radio
"""
@use_vcr
def test_input(self):
self.execute_task('test')
assert len(self.task.entries) == 60, 'Produces %i entries, expected 60' % len(self.task.entries) | Python | 0 | |
f8f28580df477ec353e5faf7b1da2404c3691a7f | test ddns module | tests/unit/modules/ddns_test.py | tests/unit/modules/ddns_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
mock_open,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import ddns
try:
import dns.query
import dns.update
import dns.tsigkeyring
dns_support = True
except ImportError as e:
dns_support = False
# Import salt libs
import salt.utils
import json
# Globals
ddns.__grains__ = {}
ddns.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class DDNSTestCase(TestCase):
'''
TestCase for the salt.modules.ddns module
'''
@patch('salt.modules.ddns.update')
def test_add_host(self, ddns_update):
'''
Test cases for Add, replace, or update the A
and PTR (reverse) records for a host.
'''
ddns_update.return_value = False
self.assertFalse(ddns.add_host(zone='A',
name='B',
ttl=1,
ip='172.27.0.0'))
ddns_update.return_value = True
self.assertTrue(ddns.add_host(zone='A',
name='B',
ttl=1,
ip='172.27.0.0'))
@patch('salt.modules.ddns.delete')
def test_delete_host(self, ddns_delete):
'''
Tests for delete the forward and reverse records for a host.
'''
ddns_delete.return_value = False
with patch.object(dns.query, 'udp') as mock:
mock.answer = [{'address': 'localhost'}]
self.assertFalse(ddns.delete_host(zone='A', name='B'))
def test_update(self):
'''
Test to add, replace, or update a DNS record.
'''
file_data = json.dumps({'A': 'B'})
with patch('dns.message.make_query', return_value=True):
with patch('dns.rdatatype.from_text', return_value=True):
with patch('dns.rdata.from_text', return_value=True):
mock = MagicMock(return_value=True)
with patch.dict(ddns.__salt__, {'config.option': mock}):
mock = MagicMock(return_value=True)
with patch.dict(ddns.__salt__, {'file.file_exists': mock}):
with patch('salt.utils.fopen',
mock_open(read_data=file_data),
create=True):
with patch.object(dns.tsigkeyring, 'from_text', return_value=True):
with patch.object(dns.query, 'udp') as mock:
mock.answer = [{'address': 'localhost'}]
self.assertFalse(ddns.update(zone='A',
name='B',
ttl=1,
rdtype='C',
data='D'))
def test_delete(self):
'''
Test to delete a DNS record.
'''
file_data = json.dumps({'A': 'B'})
with patch.object(dns.query, 'udp') as mock:
mock.answer = [{'address': 'localhost'}]
mock = MagicMock(return_value=True)
with patch.dict(ddns.__salt__, {'config.option': mock}):
mock = MagicMock(return_value=True)
with patch.dict(ddns.__salt__, {'file.file_exists': mock}):
with patch('salt.utils.fopen',
mock_open(read_data=file_data),
create=True):
with patch.object(dns.tsigkeyring, 'from_text', return_value=True):
self.assertFalse(ddns.delete(zone='A', name='B'))
if __name__ == '__main__':
from integration import run_tests
run_tests(DDNSTestCase, needs_daemon=False) | Python | 0 | |
ce01cc61ea62c717503d991826e6b9915b23900b | Fix usage print | check-bugs.py | check-bugs.py | #!/usr/bin/python
import sys
import re
import pprint
import collections
Head = collections.namedtuple("Head", "file line")
def parse(pn):
ans = collections.defaultdict(str)
head = None
for l in open(pn):
# ignore comments
if l.startswith("#"):
continue
# found a header
m = re.match("^\[(\S+):(\d+)\]+.*", l)
if m:
head = Head._make(m.groups())
continue
# collect descriptions
if head:
ans[head] += l
# chomp
return dict((h, d.strip()) for (h, d) in ans.items())
def say_pass(reason):
print "\033[1;32mPASS\033[m", reason
def say_fail(reason):
print "\033[1;31mFAIL\033[m", reason
def stat_summary(ans):
print("Summary:")
for (h, d) in ans.items():
desc = d.split("\n")[0]
print(" %-8s %+4s | %-30s .." % (h.file, h.line, desc))
if len(ans) >= 5:
say_pass("found enough bugs")
else:
say_fail("found %s bugs, but need at least 5" % len(ans))
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: %s [bugs.txt]" % sys.argv[0])
exit(1)
ans = parse(sys.argv[1])
stat_summary(ans)
| #!/usr/bin/python
import sys
import re
import pprint
import collections
Head = collections.namedtuple("Head", "file line")
def parse(pn):
ans = collections.defaultdict(str)
head = None
for l in open(pn):
# ignore comments
if l.startswith("#"):
continue
# found a header
m = re.match("^\[(\S+):(\d+)\]+.*", l)
if m:
head = Head._make(m.groups())
continue
# collect descriptions
if head:
ans[head] += l
# chomp
return dict((h, d.strip()) for (h, d) in ans.items())
def say_pass(reason):
print "\033[1;32mPASS\033[m", reason
def say_fail(reason):
print "\033[1;31mFAIL\033[m", reason
def stat_summary(ans):
print("Summary:")
for (h, d) in ans.items():
desc = d.split("\n")[0]
print(" %-8s %+4s | %-30s .." % (h.file, h.line, desc))
if len(ans) >= 5:
say_pass("found enough bugs")
else:
say_fail("found %s bugs, but need at least 5" % len(ans))
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: %s [bugs.txt]", sys.argv[0])
exit(1)
ans = parse(sys.argv[1])
stat_summary(ans)
| Python | 0.000255 |
d6fa7713556582bab54efc3ba53d27b411d8e23c | update import statements | tamil/__init__.py | tamil/__init__.py | # -*- coding: utf-8 -*-
#
# (C) 2013 Muthiah Annamalai <ezhillang@gmail.com>
# Library provides various encoding services for Tamil libraries
#
import utf8
import tscii
import txt2unicode
import txt2ipa
def printchar( letters ):
for c in letters:
print(c, u"\\u%04x"%ord(c))
P = lambda x: u" ".join(x)
| # -*- coding: utf-8 -*-
#
# (C) 2013 Muthiah Annamalai <ezhillang@gmail.com>
# Library provides various encoding services for Tamil libraries
#
from . import utf8
from . import tscii
from . import txt2unicode
from . import txt2ipa
def printchar( letters ):
for c in letters:
print(c, u"\\u%04x"%ord(c))
P = lambda x: u" ".join(x)
| Python | 0 |
97a8c8c2baffdeaf6b710cf875d2f8641b999338 | Create adapter_16mers.py | select_random_subset/adapter_16mers.py | select_random_subset/adapter_16mers.py |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 14:15:18 2017
@author: nikka.keivanfar
"""
#see also: adapters.fa
P5 = 'AATGATACGGCGACCACCGA'
P7 = 'CAAGCAGAAGACGGCATACGAGAT'
read1 = 'GATCTACACTCTTTCCCTACACGACGCTC'
read2 = 'GTGACTGGAGTTCAGACGTGT'
adapters = [P5, P7, read1, read2] #to do: streamline loops for all adapters combined
P5_kmers = {}
P7_kmers = {}
read1_kmers = {}
read2_kmers = {}
k = 16
#P5 16mers
for i in range(len(P5) - k + 1):
kmer = P5[i:i+k]
if P5_kmers.has_key(kmer):
P5_kmers[kmer] += 1
else:
P5_kmers[kmer] = 1
for kmer, count in P5_kmers.items():
print kmer + "\t" + str(count)
P5mers = set(kmer)
#P7 16mers
for i in range(len(P7) - k + 1):
kmer = P7[i:i+k]
if P7_kmers.has_key(kmer):
P7_kmers[kmer] += 1
else:
P7_kmers[kmer] = 1
for kmer, count in P7_kmers.items():
print kmer + "\t" + str(count)
P7mers = set(kmer)
#read1 16mers
for i in range(len(read1) - k + 1):
kmer = read1[i:i+k]
if read1_kmers.has_key(kmer):
read1_kmers[kmer] += 1
else:
read1_kmers[kmer] = 1
for kmer, count in read1_kmers.items():
print kmer + "\t" + str(count)
read1mers = set(kmer)
#read2 16mers
for i in range(len(read2) - k + 1):
kmer = read2[i:i+k]
if read2_kmers.has_key(kmer):
read2_kmers[kmer] += 1
else:
read2_kmers[kmer] = 1
for kmer, count in read2_kmers.items():
print kmer + "\t" + str(count)
read2mers = set(kmer)
| Python | 0.000001 | |
7c6d3e65b21431e61093b76d52efb9af68ae5f36 | initialize final model | code/final.py | code/final.py | # # Test set predictions using best final model (using all training data)
from sklearn.model_selection import TimeSeriesSplit, train_test_split
from keras.utils.visualize_util import plot
from keras.models import Sequential
from keras.layers import GRU, Dense, Masking, Dropout, Activation
from keras.callbacks import EarlyStopping
import numpy as np
from itertools import product
import cPickle as pkl
from scipy.sparse import csr_matrix
from utils import set_trace, plot_ROC
from sklearn.metrics import roc_curve, auc, roc_auc_score
import tensorflow as tf
tf.python.control_flow_ops = tf
# Load saved data
print('Load saved data')
X_train = pkl.load(open('data/X_train.np', 'rb'))
X_test = pkl.load(open('data/X_test.np', 'rb'))
y_train = pkl.load(open('data/y_train.np', 'rb'))
y_test = pkl.load(open('data/y_test.np', 'rb'))
X_train = X_train[1:X_train.shape[0]] # drop first sample so batch size is divisible
y_train = y_train[1:y_train.shape[0]]
# Label shift
lahead = 0 # number of days ahead that are used to make the prediction
if lahead!=0:
y_train = np.roll(y_train,-lahead,axis=0)
y_test = np.roll(y_test,-lahead,axis=0)
else:
pass
# Define network structure
epochs = 25
nb_timesteps = 1
nb_classes = 2
nb_features = X_train.shape[1]
output_dim = 1
# Define cross-validated model parameters
batch_size = 14
dropout = 0.25
activation = 'sigmoid'
nb_hidden = 128
initialization = 'glorot_normal'
# # Reshape X to three dimensions
# # Should have shape (batch_size, nb_timesteps, nb_features)
X_train = csr_matrix.toarray(X_train) # convert from sparse matrix to N dimensional array
X_train = np.resize(X_train, (X_train.shape[0], nb_timesteps, X_train.shape[1]))
print('X_train shape:', X_train.shape)
X_test = csr_matrix.toarray(X_test) # convert from sparse matrix to N dimensional array
X_test = np.resize(X_test, (X_test.shape[0], nb_timesteps, X_test.shape[1]))
print('X_test shape:', X_test.shape)
# Reshape y to two dimensions
# Should have shape (batch_size, output_dim)
y_train = np.resize(y_train, (X_train.shape[0], output_dim))
print('y_train shape:', y_train.shape)
y_test = np.resize(y_test, (X_test.shape[0], output_dim))
print('y_test shape:', y_test.shape)
# Initiate sequential model
print('Initializing model')
model = Sequential()
# Stack layers
# expected input batch shape: (batch_size, nb_timesteps, nb_features)
# note that we have to provide the full batch_input_shape since the network is stateful.
# the sample of index i in batch k is the follow-up for the sample i in batch k-1.
model.add(Masking(mask_value=0., batch_input_shape=(batch_size, nb_timesteps, nb_features))) # embedding for variable input lengths
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization,
batch_input_shape=(batch_size, nb_timesteps, nb_features)))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, return_sequences=True, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(GRU(nb_hidden, stateful=True, init=initialization))
model.add(Dropout(dropout))
model.add(Dense(output_dim, activation=activation))
# Configure learning process
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['binary_accuracy'])
plot(model, to_file='results/final_model.png', # Plot graph of model
show_shapes = True,
show_layer_names = False)
# Training
early_stopping = EarlyStopping(monitor='loss', patience=1)
print('Training')
for i in range(epochs):
print('Epoch', i+1, '/', epochs)
model.fit(X_train,
y_train,
batch_size=batch_size,
verbose=1,
nb_epoch=1,
shuffle=False, # turn off shuffle to ensure training data patterns remain sequential
callbacks=[early_stopping]) # stop early if training loss not improving after 1 epoch
model.reset_states()
# Evaluation
print('Evaluating results in terms of classification accuracy')
loss = model.evaluate(X_test, y_test, batch_size=batch_size) # compute loss on test data, batch-by-batch
print("%s: %.2f%%" % (model.metrics_names[1], loss[1]*100))
print('Evaluating results in terms of AUC')
y_probs = model.predict_proba(X_test, batch_size=batch_size, verbose=1)
print('AUC ' + str(roc_auc_score(y_test, y_probs)))
y_pred = model.predict(X_test, batch_size=batch_size, verbose=1) # generate output predictions for test samples, batch-by-batch
# Plot ROC curve
plot_ROC(y_test, y_pred) | Python | 0.000016 | |
faf10aa246414e4f5be735da6b1c058ec7d546b9 | remove like indexes from smsbillables | corehq/apps/smsbillables/migrations/0011_remove_like_indexes.py | corehq/apps/smsbillables/migrations/0011_remove_like_indexes.py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute("DROP INDEX IF EXISTS smsbillables_smsbillable_direction_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsbillable_domain_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsgatewayfeecriteria_backend_api_id_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsgatewayfeecriteria_backend_instance_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsgatewayfeecriteria_direction_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsgatewayfeecriteria_prefix_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsusagefeecriteria_direction_like")
db.execute("DROP INDEX IF EXISTS smsbillables_smsusagefeecriteria_domain_like")
def backwards(self, orm):
# don't add it back
pass
models = {
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'smsbillables.smsbillable': {
'Meta': {'object_name': 'SmsBillable'},
'api_response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_sent': ('django.db.models.fields.DateField', [], {}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'gateway_fee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsGatewayFee']", 'null': 'True'}),
'gateway_fee_conversion_rate': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'null': 'True', 'max_digits': '20', 'decimal_places': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'log_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'usage_fee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsUsageFee']", 'null': 'True'})
},
u'smsbillables.smsgatewayfee': {
'Meta': {'object_name': 'SmsGatewayFee'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '4'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsGatewayFeeCriteria']"}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'smsbillables.smsgatewayfeecriteria': {
'Meta': {'object_name': 'SmsGatewayFeeCriteria'},
'backend_api_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'backend_instance': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'country_code': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'db_index': 'True', 'blank': 'True'})
},
u'smsbillables.smsusagefee': {
'Meta': {'object_name': 'SmsUsageFee'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '10', 'decimal_places': '4'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['smsbillables.SmsUsageFeeCriteria']"}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'smsbillables.smsusagefeecriteria': {
'Meta': {'object_name': 'SmsUsageFeeCriteria'},
'direction': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['smsbillables']
| Python | 0.000097 | |
b405264320f258d4ea41cf32ef5f4956faa82f3f | Add subfigure | sphinxpp/subfig.py | sphinxpp/subfig.py | """
Adds subfigure functionality
"""
from docutils import nodes
import docutils.parsers.rst.directives as directives
from sphinx.util.compat import Directive
from sphinx import addnodes
class subfig(nodes.General, nodes.Element):
pass
def skip_visit(self, node):
raise nodes.SkipNode
def visit_subfig_tex(self, node):
self.__body = self.body
self.body = []
def depart_subfig_tex(self, node):
figoutput = ''.join(self.body)
figoutput = figoutput.replace('[htbp]', '[t]{%s\\linewidth}' % node['width'])
figoutput = figoutput.replace('figure', 'subfigure')
self.body = self.__body
self.body.append(figoutput)
def visit_subfig_html(self, node):
self.__body = self.body
self.body = []
def depart_subfig_html(self, node):
figoutput = ''.join(self.body)
figoutput = figoutput.replace('class="figure', 'style="width: %g%%" class="subfigure' % (float(node['width']) * 100))
self.body = self.__body
self.body.append(figoutput)
class subfigstart(nodes.General, nodes.Element):
pass
def visit_subfigstart_tex(self, node):
self.body.append('\n\\begin{figure}\n\\centering\n\\capstart\n')
def depart_subfigstart_tex(self, node):
pass
def visit_subfigstart_html(self, node):
atts = {'class': 'figure compound align-center'}
self.body.append(self.starttag(node['subfigend'], 'div', **atts))
def depart_subfigstart_html(self, node):
pass
class subfigend(nodes.General, nodes.Element):
pass
def visit_subfigend_tex(self, node):
pass
def depart_subfigend_tex(self, node):
self.body.append('\n\n\\end{figure}\n\n')
def visit_subfigend_html(self, node):
pass
def depart_subfigend_html(self, node):
self.body.append('</div>')
class SubFigEndDirective(Directive):
has_content = True
optional_arguments = 3
final_argument_whitespace = True
option_spec = {'label': directives.uri,
'alt': directives.unchanged,
'width': directives.unchanged_required}
def run(self):
label = self.options.get('label', None)
width = self.options.get('width', None)
alt = self.options.get('alt', None)
node = subfigend('', ids=[label] if label is not None else [])
if width is not None:
node['width'] = width
if alt is not None:
node['alt'] = alt
if self.content:
anon = nodes.Element()
self.state.nested_parse(self.content, self.content_offset, anon)
first_node = anon[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
node += caption
if label is not None:
targetnode = nodes.target('', '', ids=[label])
node.append(targetnode)
return [node]
class SubFigStartDirective(Directive):
has_content = False
optional_arguments = 0
def run(self):
node = subfigstart()
return [node]
def doctree_read(app, doctree):
secnums = app.builder.env.toc_secnumbers
for node in doctree.traverse(subfigstart):
parentloc = node.parent.children.index(node)
subfigendloc = parentloc
while subfigendloc < len(node.parent.children):
n = node.parent.children[subfigendloc]
if isinstance(n, subfigend):
break
subfigendloc += 1
if subfigendloc == len(node.parent.children):
return
between_nodes = node.parent.children[parentloc:subfigendloc]
subfigend_node = node.parent.children[subfigendloc]
node['subfigend'] = subfigend_node
for i, n in enumerate(between_nodes):
if isinstance(n, nodes.figure):
children = [n]
prevnode = between_nodes[i-1]
if isinstance(prevnode, nodes.target):
node.parent.children.remove(prevnode)
children.insert(0, prevnode)
nodeloc = node.parent.children.index(n)
node.parent.children[nodeloc] = subfig('', *children)
node.parent.children[nodeloc]['width'] = subfigend_node['width']
node.parent.children[nodeloc]['mainfigid'] = subfigend_node['ids'][0]
def setup(app):
app.add_node(subfigstart,
html=(visit_subfigstart_html, depart_subfigstart_html),
singlehtml=(visit_subfigstart_html, depart_subfigstart_html),
text=(skip_visit, None),
latex=(visit_subfigstart_tex, depart_subfigstart_tex))
app.add_node(subfig,
html=(visit_subfig_html, depart_subfig_html),
singlehtml=(visit_subfig_html, depart_subfig_html),
text=(skip_visit, None),
latex=(visit_subfig_tex, depart_subfig_tex))
app.add_node(subfigend,
html=(visit_subfigend_html, depart_subfigend_html),
singlehtml=(visit_subfigend_html, depart_subfigend_html),
text=(skip_visit, None),
latex=(visit_subfigend_tex, depart_subfigend_tex))
app.add_directive('subfigstart', SubFigStartDirective)
app.add_directive('subfigend', SubFigEndDirective)
app.connect('doctree-read', doctree_read)
| Python | 0.000002 | |
cfabe41b0d2f0cea143360fa1610baaaa87f8946 | add python wrapper for network builder | side_project/network_builder/MNM_nb.py | side_project/network_builder/MNM_nb.py | import os
import numpy as np
import networkx as nx
DLINK_ENUM = ['CTM', 'LQ', 'LTM', 'PQ']
DNODE_ENUM = ['FWJ', 'GRJ', 'DMOND', 'DMDND']
class MNM_dlink():
def __init__(self):
self.ID = None
self.len = None
self.typ = None
self.ffs = None
self.cap = None
self.rhoj = None
self.lanes = None
def __init__(self, ID, length, typ, ffs, cap, rhoj, lanes):
self.ID = ID
self.length = length #mile
self.typ = typ #type
self.ffs = ffs #mile/h
self.cap = cap #v/hour
self.rhoj = rhoj #v/miles
self.lanes = lanes #num of lanes
def is_ok(self, unit_time = 5):
assert(self.length > 0.0)
assert(self.ffs > 0.0)
assert(self.cap > 0.0)
assert(self.rhoj > 0.0)
assert(self.lanes > 0)
assert(self.typ in DLINK_ENUM)
assert(self.cap / self.ffs < self.rhoj)
assert(unit_time * self.ffs / 3600 >= self.length)
class MNM_dnode():
def __init__(self):
self.ID = None
self.typ = None
def __init__(self, ID, typ):
self.ID = ID
self.typ = typ
def is_ok(self):
assert(self.typ in DNODE_ENUM)
class MNM_demand():
def __init__(self):
self.demand_dict = dict()
def add_demand(self, O, D, demand, overwriting = False):
assert(type(demand) is np.ndarray)
assert(len(demand.shape) == 1)
if O not in self.demand_dict.keys():
self.demand_dict[O] = dict()
if (not overwriting) and (D in self.demand_dict[O].keys()):
raise("Error, exists OD demand already")
else:
self.demand_dict[O][D] = demand
class MNM_od():
def __init__(self):
self.O_dict = dict()
self.D_dict = dict()
def add_origin(self, O, Onode_ID, overwriting = False):
if (not overwriting) and (O in self.O_dict.keys()):
raise("Error, exists origin node already")
else:
self.O_dict[O] = Onode_ID
def add_destination(self, D, Dnode_ID, overwriting = False):
if (not overwriting) and (D in self.D_dict.keys()):
raise("Error, exists destination node already")
else:
self.D_dict[D] = Dnode_ID
class MNM_graph():
def __init__(self):
self.graph = nx.DiGraph()
self.edgeID_dict = nx.DiGraph()
def add_edge(self, s, e, ID, create_node = False, overwriting = False):
if (not overwriting) and ((s,e) in self.graph.edges()):
raise("Error, exists edge in graph")
elif (not create_node) and s in self.graph.nodes():
raise("Error, exists start node of edge in graph")
elif (not create_node) and e in self.graph.nodes():
raige("Error, exists end node of edge in graph")
else:
self.graph.add_edge(s, e, ID = ID)
self.edgeID_dict[ID] = (s, e)
def add_node(self, node, overwriting = True):
if (not overwriting) and node in self.graph.nodes():
raise("Error, exists node in graph")
else:
self.graph.add_node(node)
class MNM_routing():
def __init__(self):
print "MNM_routing"
class MNM_routing_fixed(MNM_routing):
def __init__(self):
super(MNM_routing_fixed, self).__init__()
self.required_items = ['num_path', 'choice_portion', 'route_frq', 'path_table']
class MNM_routing_adaptive(MNM_routing):
"""docstring for MNM_routing_adaptive"""
def __init__(self):
super(MNM_routing_adaptive, self).__init__()
self.required_items = ['route_frq']
class MNM_routing_hybrid(MNM_routing):
"""docstring for MNM_routing_hybrid"""
def __init__(self):
super(MNM_routing_hybrid, self).__init__()
self.required_items = []
class MNM_config():
def __init__(self):
print "MNM_config"
class MNM_network_builder():
def __init__(self):
print "Test" | Python | 0.000001 | |
8a4931dd810079c1f78858c5058df74b0e696f72 | Correct IrFilters_eval_domain when the third item starts with 'object.' | smile_action_rule/models/ir_filters.py | smile_action_rule/models/ir_filters.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import api, models, fields
from openerp.tools import ustr
from openerp.tools.safe_eval import safe_eval as eval
from ..tools import unquote
class ActionFilter(models.Model):
_inherit = 'ir.filters'
@api.one
@api.depends('domain')
def _get_action_rule(self):
localdict = {'object': unquote('object'), 'time': time,
'active_id': unquote("active_id"), 'uid': self._uid}
eval_domain = eval(self.domain, localdict)
self.action_rule = ', object.' in repr(eval_domain)
action_rule = fields.Boolean('Only for action rules', compute='_get_action_rule', store=True)
def get_filters(self, cr, uid, model, action_id=None):
action_domain = self._get_action_domain(cr, uid, action_id)
return self.search_read(cr, uid, action_domain + [
('model_id', '=', model),
('user_id', 'in', (uid, False)),
('action_rule', '=', False),
], ['name', 'is_default', 'domain', 'context', 'user_id'])
@api.multi
def _eval_domain(self, record_ids=None):
self.ensure_one()
domain = []
eval_domain = eval(self.domain, {'object': unquote('object')})
for cond in eval_domain:
if isinstance(cond, tuple) and 'object' in cond[2]:
subdomain = []
records = self.env[self.model_id].browse(record_ids)
for record in records:
new_cond = (cond[0], cond[1], eval(ustr(cond[2]), {'object': record}))
subdomain.append(new_cond)
subdomain = list(set(subdomain))
subdomain = ['|'] * (len(subdomain) - 1) + subdomain
domain.extend(subdomain)
else:
domain.append(cond)
return domain
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import api, models, fields
from ..tools import unquote
class ActionFilter(models.Model):
_inherit = 'ir.filters'
@api.one
@api.depends('domain')
def _get_action_rule(self):
localdict = {'object': unquote('object'), 'time': time,
'active_id': unquote("active_id"), 'uid': self._uid}
eval_domain = eval(self.domain.replace(' ', ''), localdict)
self.action_rule = ',object.' in eval_domain
action_rule = fields.Boolean('Only for action rules', compute='_get_action_rule', store=True)
def get_filters(self, cr, uid, model, action_id=None):
action_domain = self._get_action_domain(cr, uid, action_id)
filter_ids = self.search(cr, uid, action_domain + [
('model_id', '=', model),
('user_id', 'in', (uid, False)),
('action_rule', '=', False),
])
my_filters = self.read(cr, uid, filter_ids, ['name', 'is_default', 'domain', 'context', 'user_id'])
return my_filters
@api.multi
def _eval_domain(self, record_ids=None):
self.ensure_one()
domain = []
eval_domain = eval(self.domain, {'object': unquote('object')})
for cond in eval_domain:
if isinstance(cond, tuple) and 'object' in cond[2]:
subdomain = []
records = self.env[self.model_id].browse(record_ids)
for record in records:
new_cond = (cond[0], cond[1], eval(cond[2], {'object': record}))
subdomain.append(new_cond)
subdomain = list(set(subdomain))
subdomain = ['|'] * (len(subdomain) - 1) + subdomain
domain.extend(subdomain)
else:
domain.append(cond)
return domain
| Python | 0 |
ca043c2d3fe5fbdd37f372e8a3ad8bfd1b501c89 | Correct Workitem._execute monkeypatch | smile_action_rule/workflow/workitem.py | smile_action_rule/workflow/workitem.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import registry
from openerp.workflow.workitem import WorkflowItem
native_execute = WorkflowItem._execute
def new_execute(self, activity, stack):
if not registry(self.session.cr.dbname).get('base.action.rule'):
return native_execute(self, activity, stack)
cr, uid, ids = self.session.cr, self.session.uid, [self.record.id]
# Retrieve the action rules to possibly execute
rule_obj = registry(self.session.cr.dbname)['base.action.rule']
rules = rule_obj._get_action_rules_on_wkf(cr, uid, activity['id'])
# Check preconditions
pre_ids = {}
for rule in rules:
if rule.kind not in ('on_create', 'on_create_or_write'):
pre_ids[rule] = rule_obj._filter(cr, uid, rule, rule.filter_pre_id, ids)
# Call original method
result = native_execute(self, activity, stack)
# Check postconditions, and execute actions on the records that satisfy them
for rule in rules:
if rule.kind != 'on_unlink':
post_ids = rule_obj._filter(cr, uid, rule, rule.filter_id, pre_ids[rule])
else:
post_ids = pre_ids[rule]
if post_ids:
rule_obj._process(cr, uid, rule, post_ids)
return result
WorkflowItem._execute = new_execute
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import registry
from openerp.workflow.workitem import WorkflowItem
native_execute = WorkflowItem._execute
def new_execute(self, activity, stack):
cr, uid, ids = self.session.cr, self.session.uid, [self.record.id]
# Retrieve the action rules to possibly execute
rule_obj = registry(self.session.cr.dbname)['base.action.rule']
rules = rule_obj._get_action_rules_on_wkf(cr, uid, activity['id'])
# Check preconditions
pre_ids = {}
for rule in rules:
if rule.kind not in ('on_create', 'on_create_or_write'):
pre_ids[rule] = rule_obj._filter(cr, uid, rule, rule.filter_pre_id, ids)
# Call original method
result = native_execute(self, activity, stack)
# Check postconditions, and execute actions on the records that satisfy them
for rule in rules:
if rule.kind != 'on_unlink':
post_ids = rule_obj._filter(cr, uid, rule, rule.filter_id, pre_ids[rule])
else:
post_ids = pre_ids[rule]
if post_ids:
rule_obj._process(cr, uid, rule, post_ids)
return result
WorkflowItem._execute = new_execute
| Python | 0.000004 |
107e33ab3982f3f7fb56a1a2ac2b0eec0b67091b | Use universal newlines in gyp_helper. | build/gyp_helper.py | build/gyp_helper.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file helps gyp_chromium and landmines correctly set up the gyp
# environment from chromium.gyp_env on disk
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.dirname(SCRIPT_DIR)
def apply_gyp_environment_from_file(file_path):
"""Reads in a *.gyp_env file and applies the valid keys to os.environ."""
if not os.path.exists(file_path):
return
with open(file_path, 'rU') as f:
file_contents = f.read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = (
'CC',
'CHROMIUM_GYP_FILE',
'CHROMIUM_GYP_SYNTAX_CHECK',
'CXX',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT',
'GYP_GENERATORS',
)
for var in supported_vars:
file_val = file_data.get(var)
if file_val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = file_val
def apply_chromium_gyp_env():
if 'SKIP_CHROMIUM_GYP_ENV' not in os.environ:
# Update the environment based on chromium.gyp_env
path = os.path.join(os.path.dirname(CHROME_SRC), 'chromium.gyp_env')
apply_gyp_environment_from_file(path)
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file helps gyp_chromium and landmines correctly set up the gyp
# environment from chromium.gyp_env on disk
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.dirname(SCRIPT_DIR)
def apply_gyp_environment_from_file(file_path):
"""Reads in a *.gyp_env file and applies the valid keys to os.environ."""
if not os.path.exists(file_path):
return
with open(file_path) as f:
file_contents = f.read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = (
'CC',
'CHROMIUM_GYP_FILE',
'CHROMIUM_GYP_SYNTAX_CHECK',
'CXX',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT',
'GYP_GENERATORS',
)
for var in supported_vars:
file_val = file_data.get(var)
if file_val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = file_val
def apply_chromium_gyp_env():
if 'SKIP_CHROMIUM_GYP_ENV' not in os.environ:
# Update the environment based on chromium.gyp_env
path = os.path.join(os.path.dirname(CHROME_SRC), 'chromium.gyp_env')
apply_gyp_environment_from_file(path)
| Python | 0.000007 |
e6b9b471dba3d7ef0bac94454136b07ef3247093 | Add 'tools/translation/' from commit '83e3aa43dd35a8c0ef0658303806e355204c7100' | tools/translation/nb_code_sync.py | tools/translation/nb_code_sync.py | #!/usr/bin/env python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keep translated notebook code in sync with the source-of-truth notebook.
This tool attempts to make it easier to keep the community translation *code*
in sync with the en/ source-or-truth notebooks. It intentionally ignores
Markdown cells and only compares code cells. There must be the same amount of
code cells in source notebook and translation notebook.
Usage: nb_code_sync.py [--lang=en] site/lang/notebook.ipynb [...]
Useful when used with interactive git workflow to selectively add hunks:
git add --patch site/lang/notebook.ipynb
Commands:
y: stage this hunk
n: do not stage this hunk
s: split this hunk
e: edit this hunk
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import json
import os
import pathlib
import re
import sys
from absl import app
from absl import flags
flags.DEFINE_enum("lang", "en", ["en", "js", "ko", "pt", "ru", "tr", "zh-cn"],
"Language directory to import from.")
flags.DEFINE_string("src", None, "Source file or parent directory of source.")
flags.DEFINE_boolean("stdout", False, "Write to stdout instead of file.")
flags.DEFINE_string("site_root", None, "Root directory of site docs.")
class Notebook(object):
"""Represents a parsed .ipynb notebook file.
Attributes:
path: Path to the notebook file.
data: All cells parsed from notebook.
code_cells: Only code cells parsed from notebook.
"""
path = None
def __init__(self, data):
"""Inits Notebook from parsed .ipynb notebook data."""
self.data = data
self.code_cells = self._load_code_cells(self.data)
@classmethod
def from_path(cls, path):
"""Inits Notebook using path to .pynb file."""
pth = Notebook._check_path(path)
with open(pth) as json_data:
data = json.load(json_data)
nb = Notebook(data)
nb.path = pth
return nb
@staticmethod
def is_notebook(path):
"""Test of a file is an .ipynb file based on extension."""
if not os.path.isfile(path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
return os.path.splitext(path)[-1].lower() == ".ipynb"
@staticmethod
def _check_path(pth):
if not Notebook.is_notebook(pth):
raise Exception("Notebook must be an .ipynb file: {}".format(pth))
path = pathlib.Path(pth)
if not path.exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
return path
def _load_code_cells(self, data):
# parse code cells
code_cells = [c for c in data["cells"] if c["cell_type"] == "code"]
# Discard last cell if empty
cell_source = code_cells[-1]["source"]
# remove empty strings, then test if anything is left
if not any(cell_source):
del code_cells[-1]
return code_cells
@staticmethod
def _strip_line(line):
"""Remove comments and any trailing whitespace."""
line = re.sub(r"^(.*?)#(.*)$", r"\1", line)
return line.rstrip()
@staticmethod
def _is_source_code_equal(x_list, y_list):
"""Scrub lines of comments, remove empty lines, then compare."""
x_list = [Notebook._strip_line(line) for line in x_list if line]
y_list = [Notebook._strip_line(line) for line in y_list if line]
return x_list == y_list
def _set_cell_source(self, cell_id, source):
for i, cell in enumerate(self.data["cells"]):
if cell["metadata"]["id"] == cell_id:
self.data["cells"][i]["source"] = source
break
else:
# for-loop exhausted
raise Exception("Did not find cell id '{}' in notebook.".format(cell_id))
def update(self, notebook):
"""Update code cells that differ from the provided notebook."""
if len(self.code_cells) != len(notebook.code_cells):
raise Exception("Notebooks must have same amount of code cells.")
# Iterate all cells for destination reference
for i, src_cell in enumerate(notebook.code_cells):
dest_cell = self.code_cells[i]
# Compare source code after scrubbing comments.
# Ensures translated comments are preserved until the code changes.
if not Notebook._is_source_code_equal(src_cell["source"],
dest_cell["source"]):
self._set_cell_source(dest_cell["metadata"]["id"], src_cell["source"])
def write(self, use_stdout=False):
"""Write notebook to file or print to screen."""
def print_file(outfile):
json.dump(self.data, outfile, indent=2, ensure_ascii=False)
outfile.write("\n") # add trailing newline
if use_stdout:
print_file(sys.stdout)
else:
with open(self.path, "w") as outfile:
print_file(outfile)
print("Wrote: {}".format(self.path))
def get_src_path(user_flags, notebook):
"""Get path of source notebook based on user flags or the destination file.
Args:
user_flags: Command-line arguments
notebook: Destination notebook used to select source notebook.
Returns:
A Path of the source-of-truth notebook.
Raises:
FileNotFoundError: If user args for site_root or src are invalid locations.
"""
if user_flags.site_root:
site_root = pathlib.Path(user_flags.site_root)
else:
site_root = pathlib.Path(__file__).parent.parent.joinpath("site")
if not site_root.is_dir():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), site_root)
if not user_flags.src:
# Determine path from previous notebook and source language
fp_relpath = notebook.path.relative_to(site_root) # relative path
fp_relpath = pathlib.Path(*fp_relpath.parts[1:])
return site_root.joinpath(user_flags.lang, fp_relpath)
elif os.path.isdir(user_flags.src):
return pathlib.Path(user_flags.src) / notebook.path.name
elif os.path.isfile(user_flags.src):
return pathlib.Path(user_flags.src)
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
user_flags.src)
def main(argv):
if len(argv) < 2:
raise app.UsageError("Missing command-line arguments.")
for dest_path in argv[1:]:
if not Notebook.is_notebook(dest_path):
print("Not a notebook file, skipping: {}".format(dest_path),
file=sys.stderr)
continue
dest_notebook = Notebook.from_path(dest_path)
src_path = get_src_path(flags.FLAGS, dest_notebook)
src_notebook = Notebook.from_path(src_path)
dest_notebook.update(src_notebook)
dest_notebook.write(flags.FLAGS.stdout)
if __name__ == "__main__":
app.run(main)
| Python | 0.000036 | |
fbbaa3fc5b99eed88e039c232f129aaeab0a6f54 | Bring table test coverage to 100% | tests/test_table.py | tests/test_table.py | #!/usr/bin/env python3
import nose.tools as nose
from table import Table
def test_init_default():
"""should initialize table with required parameters and default values"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'left')
nose.assert_equal(table.title, None)
nose.assert_equal(table.header, [])
nose.assert_equal(table.rows, [])
def test_init_optional():
"""should initialize table with optional parameters if supplied"""
table = Table(num_cols=5, width=78, alignment='right', title='Cache')
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'right')
nose.assert_equal(table.title, 'Cache')
def test_get_separator():
"""should return the correct ASCII separator string"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.get_separator(), '-' * 78)
def test_str_title():
"""should correctly display title"""
table = Table(num_cols=5, width=12, title='Cache')
nose.assert_regexp_matches(
''.join(('Cache'.center(12), '\n', ('-' * 12))), str(table))
def test_str_no_title():
"""should not display title if not originally supplied"""
table = Table(num_cols=5, width=12)
nose.assert_equal(str(table).strip(), '')
class TestAlignment(object):
def _test_str_align(self, alignment, just):
table_width = 16
num_cols = 2
col_width = table_width // num_cols
table = Table(
num_cols=num_cols, width=table_width, alignment=alignment)
table.header = ['First', 'Last']
table.rows.append(['Bob', 'Smith'])
table.rows.append(['John', 'Earl'])
nose.assert_equal(str(table), '{}{}\n{}\n{}{}\n{}{}'.format(
just('First', col_width), just('Last', col_width),
'-' * table_width,
just('Bob', col_width), just('Smith', col_width),
just('John', col_width), just('Earl', col_width)))
def test_str_align_left(self):
"""should correctly display table when left-aligned"""
self._test_str_align(
alignment='left', just=str.ljust)
def test_str_align_center(self):
"""should correctly display table when center-aligned"""
self._test_str_align(
alignment='center', just=str.center)
def test_str_align_right(self):
"""should correctly display table when right-aligned"""
self._test_str_align(
alignment='right', just=str.rjust)
| Python | 0 | |
f392a90ae12a5f9aab04b22e82d493d0f93db9fd | Add first test | tests/test_utils.py | tests/test_utils.py | def test_ok():
assert True
| Python | 0.000029 | |
e56a9781f4e7e8042c29c9e54966659c87c5c05c | Add a test for our more general views. | tests/test_views.py | tests/test_views.py | import pytest
from django.core.urlresolvers import reverse
def test_site_view(client):
response = client.get(reverse('site-home'))
assert response.status_code == 200
assert 'landings/home_site.html' in [template.name for template in response.templates]
| Python | 0 | |
675b7fc917b5f99120ca4d6dcb79b3e821dbe72a | add Olin specific script | downloadOlin.py | downloadOlin.py | import os
from downloadMailmanArchives import main
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
if __name__ == '__main__':
args = {
'archive_root_url': ['https://lists.olin.edu/mailman/private/carpediem/', 'https://lists.olin.edu/mailman/private/helpme/'],
'password' : os.environ.get('ARCHIVE_LOGIN', "fail"),
'username' : os.environ.get('ARCHIVE_PASS', "fail"),
'force' : True,
'dest' : './archives'
}
main(Struct(**args))
| Python | 0 | |
53e0bb6c88d64cd1ee1717e19f51c8aed1ba5cdd | implement expect column values valid MBTI (#4702) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_mbti.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_mbti.py | import re
from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
MBTI_REGEX = r"^(?i)([EI][NS][FT][PJ])$"
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidMbti(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_mbti"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def matches_mbti_regex(x):
if re.match(MBTI_REGEX, str(x)):
return True
return False
return column.apply(lambda x: matches_mbti_regex(x) if x else False)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidMbti(ColumnMapExpectation):
"""This Expectation validates data as conforming to the valid Myers–Briggs Type Indicator (MBTI)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_mbti": [
"INTP",
"intp",
"ESFJ",
"INFP",
],
"malformed_mbti": [
"",
"INTP-",
"IETP",
"This is not a valid MBTI",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_mbti"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_mbti"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_mbti"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMbti().print_diagnostic_checklist()
| Python | 0.000109 | |
a46f0a709747dbe90f0495e7e6b12c7b511baa7f | Delete duplicate wapp images | dupe_deleter.py | dupe_deleter.py | """
# Install QPython3 for android
# https://github.com/qpython-android/qpython3/releases
# Execute the below script in QPython3
"""
import os, hashlib
from operator import itemgetter
from itertools import groupby
image_list = []
folder_list = [r'/storage/emulated/0/whatsapp/media/whatsapp images/',
r'/storage/emulated/0/whatsapp/media/whatsapp images/Sent']
for folder in folder_list:
file_list = os.listdir(folder)
for img_file in file_list:
file_path = os.path.join(folder, img_file)
if os.path.isfile(file_path):
try:
image_list.append([file_path, hashlib.sha1(open(file_path, 'rb').read()).hexdigest()])
except IOError:
raise Exception('Error reading the file')
image_list.sort(key=itemgetter(1))
groups = groupby(image_list, itemgetter(1))
for (img_hash, img_list_same_hash) in groups:
z = [img for img in img_list_same_hash]
i = 1
while i < len(z):
os.remove(z[i][0])
print('Deleted ' + z[i][0])
i += 1
| Python | 0.000001 | |
9f4452983e38d002e141ed0d2a9c865656a553ce | add todo stuff | main.py | main.py | #!/usr/bin/env python
# __author__ = 'Dmitry Shihaleev'
# __version__= '0.2'
# __email__ = 'padavankun@gmail.com'
# __license__ = 'MIT License'
| Python | 0 | |
ab7c0d05a6bcf8be83409ccd96f2ed4a6fe65a73 | Create main.py | main.py | main.py | #!/usr/bin/env python3
import sys
import socket
import string
HOST = "chat.freenode.net" # You can change this to whatever you want
PORT = 6667
NICK = "Your Nick Name"
IDENT = "Your Identity"
REALNAME = "Your REAL Name"
MASTER = "The Master of this particular Slave"
CHANNEL = "The Channel To join"
readbuffer = ""
s = socket.socket()
s.connect((HOST, PORT))
# sets the nickname inside IRC channel
s.send(bytes("NICK %s\r\n" % NICK, "UTF-8"))
# Connects to the server using the provided inforamtion above. The 'bla' is irrelevent
s.send(bytes("USER %s %s bla :%s\r\n" % (IDENT, HOST, REALNAME), "UTF-8"))
# Joins the Channel
s.send(bytes("JOIN #%s \r\n" % (CHANNEL), "UTF-8"))
# starts a conversation with the 'master' when joining
s.send(bytes("PRIVMSG %s :Hello Master\r\n" % MASTER, "UTF-8"))
while True:
readbuffer = readbuffer+s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
line = str.rstrip(line)
line = str.split(line)
if(line[0] == "PING"):
s.send(bytes("PONG %s\r\n" % line[1], "UTF-8"))
if(line[1] == "PRIVMSG"):
sender = ""
for char in line[0]:
if(char == "!"):
break
if(char != ":"):
sender += char
size = len(line)
i = 3
message = ""
while(i < size):
message += line[i] + " "
i = i + 1
message.lstrip(":")
s.send(bytes("PRIVMSG %s %s \r\n" % (sender, message), "UTF-8"))
for index, i in enumerate(line):
print(line[index])
| Python | 0 | |
12490fa92e54becca77c70d124d807b19d71afa1 | Create main.py | main.py | main.py | # waypointviewer.py Waypoint Viewer Google Maps/Google AppEngine application
# Copyright (C) 2011 Tom Payne
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from google.appengine.api.urlfetch import fetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import os.path
import waypoint
class MainPage(webapp.RequestHandler):
def get(self):
template_values = dict((key, self.request.get(key)) for key in ('kml', 'logo', 'tsk', 'title', 'wpt'))
path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html')
self.response.out.write(template.render(path, template_values))
class WaypointviewerJs(webapp.RequestHandler):
def get(self):
template_values = dict((key, self.request.get(key)) for key in ('kml', 'logo', 'tsk', 'wpt'))
path = os.path.join(os.path.dirname(__file__), 'templates', 'waypointviewer.js')
self.response.headers['content-type'] = 'text/javascript'
self.response.out.write(template.render(path, template_values))
class Wpt2json(webapp.RequestHandler):
def get(self):
debug = self.request.get('debug')
wpt = self.request.get('wpt')
response = fetch(wpt)
content = response.content.decode('latin_1')
feature_collection = waypoint.feature_collection(content.splitlines(), debug=debug)
if debug:
feature_collection_properties['content'] = content
feature_collection_properties['content_was_truncated'] = response.content_was_truncated
feature_collection_properties['final_url'] = response.final_url
headers = dict((key, response.headers[key]) for key in response.headers)
feature_collection_properties['headers'] = headers
feature_collection_properties['status_code'] = response.status_code
keywords = {'indent': 4, 'sort_keys': True}
else:
keywords = {}
self.response.headers['content-type'] = 'application/json'
self.response.out.write(json.dumps(feature_collection, **keywords))
app = webapp.WSGIApplication([('/', MainPage), ('/waypointviewer.js', WaypointviewerJs), ('/wpt2json.json', Wpt2json)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
7e4cfdbdb6bcfd385bcbf9ddcca99b4e9800efaf | Create musy.py | musy.py | musy.py | import os
def crear_carpeta(carpeta):
if not os.path.exists(carpeta):
os.makedirs(carpeta)
def generar_datos_post(plantilla,post):
return (plantilla.replace("*t",post["titulo"])
.replace("*d",post["resumen"])
.replace("*e",post["enlace"])
.replace("*c",post["contenido"])
)
def grabar_fichero(carpeta, nombre, datos):
with open(os.path.join(carpeta,nombre), "w") as g:
g.write(datos)
g.close()
plantilla = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>*t</title>
<meta name="description" content="*d">
<link rel="stylesheet" href="/css/main.css">
<link rel="stylesheet" href="/css/custom.css">
<link rel="canonical" href="http://sabiavida.com/*e/">
<link rel="alternate" type="application/rss+xml" title="Sabia Vida" href="http://sabiavida.com/feed.xml" />
</head>
<body>
<div class="page-content">
<div class="wrapper">
<div class="post">
<div class="container">
<div class="row">
<div class="col-md-11 col-lg-9">
<header class="post-header">
<h1><small><a href="http://sabiavida.com">SabiaVida</a></small> <br><br><br><div class="post_title">*t</div></h1>
<p class="lead">de Miguel de Luis</p>
</header>
<article class="post-content">
<div class="container">
<div class="row">
<div class="col-md-11 col-lg-9">
<div class="jumbotron">
*c
</div>
</div>
</div>
</div>
</article>
</div>
</div>
</div>
</div>
</div>
</div>
<footer class="site-footer">
<div class="container">
<nav class="navbar navbar-default navbar-fixed-bottom hidden-xs" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="http://sabiavida.com">SabiaVida</a>
</div>
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav"><li id="menu-item-10934" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-10934"><a href="http://sabiavida.com/contacto/">Contacto</a></li>
<li id="menu-item-10935" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-10935"><a href="/archivos/">Archivos</a></li>
<li id="menu-item-12525" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-12525"><a href="http://sabiavida.us2.list-manage1.com/subscribe?u=f89f0f12785e9891d2a51bbc2&id=a3ff4967cf">Suscríbete</a></li>
</div>
</div>
</nav>
<div class="visible-xs">
<a class="btn btn-default btn-lg btn-block" href="/">inicio</a>
<a class="btn btn-default btn-lg btn-block" href="http://sabiavida.com/contacto/">contacto</a>
<a href="http://sabiavida.us2.list-manage1.com/subscribe?u=f89f0f12785e9891d2a51bbc2&id=a3ff4967cf">Suscríbete</a>
</div>
</div>
</footer>
</body>
</html>
'''
posts = [
{
"titulo":"Uno",
"enlace":"uno-enlace",
"resumen":"primer post",
"contenido":'''
<p>Este es el primer post</p>
<p>¡Bien!</p>
''',
},
{
"titulo":"Dos",
"enlace":"dos-enlace",
"resumen":"segundo post",
"contenido":'''
<p>Este es el primer post</p>
<p>¡Bien!</p>
''',
}
]
for post in posts:
crear_carpeta(post["enlace"])
datos = generar_datos_post(plantilla,post)
grabar_fichero(post["enlace"], 'index.html', datos)
| Python | 0.000001 | |
34847fbe0e04a2da8957a0ba5de92856ca73c8cc | Add missing migration | osf/migrations/0054_auto_20170823_1555.py | osf/migrations/0054_auto_20170823_1555.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 20:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0053_add_quickfiles'),
]
operations = [
migrations.AlterField(
model_name='abstractnode',
name='type',
field=models.CharField(choices=[('osf.node', 'node'), ('osf.collection', 'collection'), ('osf.registration', 'registration'), ('osf.quickfilesnode', 'quick files node')], db_index=True, max_length=255),
),
]
| Python | 0.0002 | |
08df517cad06279120dc2ee9a4942bf86afc8323 | add plot.py | plot.py | plot.py | #!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
import csv
bar_width = 0.6
xstick_offset = 0.3
def to_float(str):
return float(str.replace(',', ''))
def plot_wc_time(algebras, time, std, output_name):
"""
Plot wall clock time
"""
assert len(algebras) == len(time)
assert len(std) == len(time)
ind = np.arange(len(time)) # the x locations for the groups
fig, ax = plt.subplots()
ax.bar(ind, time, width=bar_width, color='m', yerr=std)
ax.set_ylabel('Time (sec)')
ax.set_xlabel('Physical Algebra')
ax.set_title('Wall clock time')
ax.set_xticks(ind+xstick_offset)
ax.set_xticklabels(algebras)
print "outputing {}".format(output_name)
plt.savefig(output_name, format='pdf')
def plot_cpu_time(algebras, time, output_name):
"""
Plot cpu time
"""
assert len(algebras) == len(time)
ind = np.arange(len(time)) # the x locations for the groups
fig, ax = plt.subplots()
ax.bar(ind, time, width=bar_width, color='m')
ax.set_ylabel('Time (sec)')
ax.set_xlabel('Physical Algebra')
ax.set_title('Total CPU time')
ax.set_xticks(ind+xstick_offset)
ax.set_xticklabels(algebras)
print "outputing {}".format(output_name)
plt.savefig(output_name, format='pdf')
def plot_shuffle_skew(algebras, skews, output_name):
"""
Plot max skew among shuffles
"""
assert len(algebras) == len(skews)
ind = np.arange(len(skews)) # the x locations for the groups
fig, ax = plt.subplots()
ax.bar(ind, skews, width=bar_width, color='m')
ax.set_ylabel('Skew (max/avg)')
ax.set_xlabel('Physical Algebra')
ax.set_title('Maximal skew in shuffle(s)')
ax.set_xticks(ind+xstick_offset)
ax.set_xticklabels(algebras)
print "outputing {}".format(output_name)
plt.savefig(output_name, format='pdf')
def plot_shuffle_size(algebras, shuffle_size, output_name):
"""
Plot shuffle size
"""
assert len(algebras) == len(shuffle_size)
ind = np.arange(len(shuffle_size)) # the x locations for the groups
fig, ax = plt.subplots()
ax.bar(ind, shuffle_size, width=bar_width, color='m')
ax.set_ylabel('Number of tuples shuffled (million)')
ax.set_xlabel('Physical Algebra')
ax.set_title('Number of tuples shuffled')
ax.set_xticks(ind+xstick_offset)
ax.set_xticklabels(algebras)
print "outputing {}".format(output_name)
plt.savefig(output_name, format='pdf')
def plot_output_skew(algebras, skews, output_name):
"""
Plot max skew among output
"""
assert len(algebras) == len(skews)
ind = np.arange(len(skews)) # the x locations for the groups
fig, ax = plt.subplots()
ax.bar(ind, skews, width=bar_width, color='m')
ax.set_ylabel('Skew (max/avg)')
ax.set_xlabel('Physical Algebra')
ax.set_title('Maximal skew in output')
ax.set_xticks(ind+xstick_offset)
ax.set_xticklabels(algebras)
print "outputing {}".format(output_name)
plt.savefig(output_name, format='pdf')
def plot_hashtable_size(algebras, htsizes, output_name):
"""
"""
assert len(algebras) == len(htsizes)
ind = np.arange(len(htsizes)) # the x locations for the groups
fig, ax = plt.subplots()
ax.bar(ind, htsizes, width=bar_width, color='m')
ax.set_ylabel('Memory usage (MB)')
ax.set_xlabel('Physical Algebra')
ax.set_title('Memory usage')
ax.set_xticks(ind+xstick_offset)
ax.set_xticklabels(algebras)
print "outputing {}".format(output_name)
plt.savefig(output_name, format='pdf')
def plot():
fname = "SIGMOD Experiment - summary.csv"
agbrs = ('RS_HJ', 'HC_HJ', 'BR_HJ', 'RS_LFJ', 'HC_LFJ', 'BR_LFJ')
queries = ('triangle', 'clique', 'fb_q1')
with open(fname, "rU") as f:
csvreader = csv.reader(f)
data = [r for r in csvreader]
# output wall clock time
for query in queries:
time = []
std = []
for row in data:
if row[0] == query:
time.append(to_float(row[2]))
std.append(to_float(row[3]))
plot_wc_time(agbrs, time, std, "{}_wall_time.pdf".format(query))
# output cpu time
for query in queries:
time = []
for row in data:
if row[0] == query:
time.append(to_float(row[8]))
plot_cpu_time(agbrs, time, "{}_cpu_time.pdf".format(query))
# output shuffle skew
for query in queries:
skews = []
for row in data:
if row[0] == query:
skews.append(to_float(row[7]))
plot_shuffle_skew(
agbrs, skews, "{}_shuffle_skew.pdf".format(query))
# output number of tuples shuffled in total
for query in queries:
shuffle_sizes = []
for row in data:
if row[0] == query:
shuffle_sizes.append(to_float(row[10]))
plot_shuffle_size(
agbrs, shuffle_sizes, "{}_shuffle_size.pdf".format(query))
# output output skew
for query in queries:
skews = []
for row in data:
if row[0] == query:
skews.append(to_float(row[11]))
plot_shuffle_skew(
agbrs, skews, "{}_output_skew.pdf".format(query))
# output hash table size
for query in queries:
htsizes = []
for row in data:
if row[0] == query:
htsizes.append(to_float(row[9]))
plot_hashtable_size(
agbrs, htsizes, "{}_memory.pdf".format(query))
if __name__ == '__main__':
plot()
| Python | 0.000012 | |
4102ab8fc24265aaee1ecbf673bec260b3b3e5df | add max sub arr impl | bioinformatics/dynamic_programming/max_sum_sub_arr1.py | bioinformatics/dynamic_programming/max_sum_sub_arr1.py | def max_sum_sub_arr(arr):
score_vector = [0 for _ in range(len(arr))]
def max_sum_sub_arr_detail(beg_idx):
if beg_idx >= len(arr):
return 0
elif arr[beg_idx] >= 0:
score_vector[beg_idx] = arr[beg_idx] + max_sum_sub_arr_detail(beg_idx + 1)
return score_vector[beg_idx]
else:
score_vector[beg_idx] = max(0, arr[beg_idx] + max_sum_sub_arr_detail(beg_idx + 1))
return score_vector[beg_idx]
max_sum_sub_arr_detail(0)
print score_vector
return max(score_vector)
if __name__ == '__main__':
print max_sum_sub_arr([1, -2, 3, 10, -4, 7, 2, -5])
| Python | 0.000001 | |
1043acdfe324e02bc2a8629ef8a47d6ae9befd7c | Add python script to get ECC608 Public Key | src/aiy/_drivers/_ecc608_pubkey.py | src/aiy/_drivers/_ecc608_pubkey.py | #!/usr/bin/env python3
import base64
import ctypes
import sys
CRYPTO_ADDRESS_DICT = {
'Vision Bonnet': 0x60,
'Voice Bonnet': 0x62,
}
class AtcaIfaceCfgLong(ctypes.Structure):
_fields_ = (
('iface_type', ctypes.c_ulong),
('devtype', ctypes.c_ulong),
('slave_address', ctypes.c_ubyte),
('bus', ctypes.c_ubyte),
('baud', ctypes.c_ulong)
)
def main():
try:
cryptolib = ctypes.cdll.LoadLibrary('libcryptoauth.so')
except Exception:
print('Unable to load crypto library, SW authentication required')
sys.exit()
try:
for name, addr in CRYPTO_ADDRESS_DICT.items():
cfg = AtcaIfaceCfgLong.in_dll(cryptolib, 'cfg_ateccx08a_i2c_default')
cfg.slave_address = addr << 1
cfg.bus = 1 # ARM I2C
cfg.devtype = 3 # ECC608
status = cryptolib.atcab_init(cryptolib.cfg_ateccx08a_i2c_default)
if status == 0:
# Found a valid crypto chip.
break
else:
cryptolib.atcab_release()
if status:
raise Exception
serial = ctypes.create_string_buffer(9)
status = cryptolib.atcab_read_serial_number(ctypes.byref(serial))
if status:
raise Exception
serial = ''.join('%02X' % x for x in serial.raw)
print('Serial Number: %s\n' % serial, file=sys.stderr)
pubkey = ctypes.create_string_buffer(64)
status = cryptolib.atcab_genkey_base(0, 0, None, ctypes.byref(pubkey))
if status:
raise Exception
public_key = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + bytes(pubkey.raw)
public_key = '-----BEGIN PUBLIC KEY-----\n' + \
base64.b64encode(public_key).decode('ascii') + '\n-----END PUBLIC KEY-----'
print(public_key)
status = cryptolib.atcab_release()
if status:
raise Exception
except Exception:
print('Unable to communicate with crypto, SW authentication required')
if __name__ == '__main__':
main()
| Python | 0.000001 | |
b04503bddfa3b0d737308ac8ecb7f06ac866e6eb | Create __init__.py | __init__.py | __init__.py | Python | 0.000429 | ||
0b891e401bf0e671d3bc6f0347a456f1cc5b07b3 | add __init__.py for root package | __init__.py | __init__.py | import sysdic
| Python | 0.000018 | |
14f175c294ec6b5dcd75887a031386c1c9d7060d | add __main__ | __main__.py | __main__.py | from . import parser
import sys
if len(sys.argv) == 1:
print("compile yarh to html")
print("usage: yarh [YARH_FILE]...")
print(" yarh -- [YARH_STRING]")
print(" yarh [YARH_FILE]... -- [YARH_STRING]")
sys.exit(1)
fromfile = True
for arg in sys.argv[1:]:
if arg == "--":
fromfile = False
continue
if fromfile:
f = open(arg, "r")
print(parser.parseyarh(f.read()).html())
else:
print(parser.parseyarh(arg).html())
fromfile = True
break
if not fromfile:
print(parser.parseyarh(sys.stdin.read()).html())
| Python | 0.000247 | |
e29c8e2d55464ac765db60a5cc213bb943b60742 | add [[Portal:Beer]] and [[Portal:Wine]] tagging request script | trunk/portalbeer.py | trunk/portalbeer.py | #!usr/bin/python
import sys, os, re
sys.path.append(os.environ['HOME'] + '/stuffs/pywiki/pywikipedia')
import wikipedia as wiki
site = wiki.getSite()
page = 'Portal:Beer/Selected picture/'
#get page list
pages = []
num = 0
content = """\
{{Selected picture
| image =
| size =
| caption =
| text =
| credit =
| link =
}}
"""
while num <=50:
num +=1
pages.append(wiki.Page(site,page + str(num)))
#for page in pages:
# print page
# wiki.showDiff(page.get(), content)
# page.put(content, 'Updating per [[WP:BOTREQ]]')
raw_content = """\
{{Selected picture
| image =
| size =
| caption =
| text =
| credit =
| link =
}}
[[Category:Wine Portal]]
"""
pagewine = 'Portal:Wine/Selected picture/'
pages1 = []
num = 0
while num <50:
num +=1
pages1.append(wiki.Page(site,pagewine + str(num)))
for page in pages1:
print page
try:
wikitext = page.get()
newtext = re.sub('Portal:Wine/Selected picture/Layout','Selected picture', wikitext)
wiki.showDiff(wikitext, newtext)
page.put(newtext, 'Updating per [[WP:BOTREQ]]')
except wiki.NoPage:
page.put(raw_content, 'Updating per [[WP:BOTREQ]]') | Python | 0.000001 | |
b56525d084ccbf1fe569900338f00a37e763d7dd | Add test_runtime_performance test | congress/tests/policy/test_runtime_performance.py | congress/tests/policy/test_runtime_performance.py | # Copyright (c) 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from congress.openstack.common import log as logging
from congress.policy import base
from congress.policy.compile import Literal
from congress.policy import runtime
from congress.tests import base as testbase
from congress.tests import helper
LOG = logging.getLogger(__name__)
NREC_THEORY = 'non-recursive theory'
DB_THEORY = 'database'
class TestRuntimePerformance(testbase.TestCase):
"""Tests for Runtime performance that are not specific to any theory.
To run one test:
nosetests -v \
congress/tests/policy/test_runtime_performance.py:TestRuntimePerformance.test_foo
To collect profiling data:
python -m cProfile -o profile.out `which nosetests` -v \
congress/tests/policy/test_runtime_performance.py:TestRuntimePerformance.test_foo
To parse and sort profiling data in different ways:
import pstats
pstats.Stats('profile.out').strip_dirs().sort_stats("cum").print_stats()
pstats.Stats('profile.out').strip_dirs().sort_stats("time").print_stats()
pstats.Stats('profile.out').strip_dirs().sort_stats("calls").print_stats()
"""
def setUp(self):
super(TestRuntimePerformance, self).setUp()
self._runtime = runtime.Runtime()
self._runtime.create_policy(NREC_THEORY,
kind=base.NONRECURSIVE_POLICY_TYPE)
self._runtime.create_policy(DB_THEORY, kind=base.DATABASE_POLICY_TYPE)
self._runtime.debug_mode()
self._runtime.insert('', target=NREC_THEORY)
def _create_event(self, table, tuple_, insert, target):
return runtime.Event(Literal.create_from_table_tuple(table, tuple_),
insert=insert, target=target)
def test_insert_nonrecursive(self):
MAX = 100
th = NREC_THEORY
for i in range(MAX):
self._runtime.insert('r(%d)' % i, th)
def test_insert_database(self):
MAX = 100
th = DB_THEORY
for i in range(MAX):
self._runtime.insert('r(%d)' % i, th)
def test_update_nonrecursive(self):
MAX = 10000
th = NREC_THEORY
updates = [self._create_event('r', (i,), True, th)
for i in range(MAX)]
self._runtime.update(updates)
def test_update_database(self):
MAX = 1000
th = DB_THEORY
updates = [self._create_event('r', (i,), True, th)
for i in range(MAX)]
self._runtime.update(updates)
def test_indexing(self):
MAX = 100
th = NREC_THEORY
for table in ('a', 'b', 'c'):
updates = [self._create_event(table, (i,), True, th)
for i in range(MAX)]
self._runtime.update(updates)
# With indexing, this query should take O(n) time where n is MAX.
# Without indexing, this query will take O(n^3).
self._runtime.insert('d(x) :- a(x), b(x), c(x)', th)
ans = ' '.join(['d(%d)' % i for i in range(MAX)])
self.assertTrue(helper.datalog_equal(self._runtime.select('d(x)',
th), ans))
def test_select(self):
# with different types of policies (exercise indexing, large sets,
# many joins, etc)
pass
def test_simulate(self):
# We're interested in latency here. We think the cost will be the sum
# of the simulate call + the cost to do and undo the evaluation, so
# this test should focus on the cost specific to the simulate call, so
# the the test should do a minimal amount of evaluation.
pass
def test_runtime_initialize_tables(self):
MAX = 1000
formulas = [('p', 1, 2, 'foo', 'bar', i) for i in range(MAX)]
th = NREC_THEORY
self._runtime.initialize_tables(['p'], formulas, th)
| Python | 0.000016 | |
525a6d214d3f6f9731c16bbf10ed150d1fa24021 | Create betweenness.py | src/betweenness.py | src/betweenness.py | '''
Created on Feb 20, 2019
@author: Victor
'''
from neo4j.v1 import GraphDatabase, basic_auth
driver = GraphDatabase.driver("bolt://localhost")
session = driver.session()
query = '''CALL algo.betweenness.sampled.stream(null, null,
{strategy:'random', probability:1.0, maxDepth:1, direction: 'both'})
YIELD nodeId, centrality
MATCH (actor) WHERE id(actor) = nodeId and exists(actor.name)
RETURN actor.name AS actor,centrality
ORDER BY centrality DESC LIMIT 10;'''
result = session.run(query)
print("Top Central Nodes")
print("-----------------")
for record in result:
print("%s %s" % (record["actor"].ljust(25, " "), record["centrality"]))
session.close()
| Python | 0.000139 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.