commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
fd2cc81feab4b24b276c8f4a0a8efc16cacef60b | Add template for lab 09 class A | giovanism/TarungLab,laymonage/TarungLab | Lab/09/Template_09_A.py | Lab/09/Template_09_A.py |
class Bangunan:
def __init__(self, nama, lama_sewa, harga_sewa):
self.nama = nama
self.lama_sewa = lama_sewa
self.harga_sewa = harga_sewa
def getHargaSewa(self):
return self.harga_sewa
class Restoran(Object):
def __init__(self, nama, lama_sewa):
Bangunan.__init__(self, nama, lama_sewa, 30000000)
# Silahkan ditambahkan class-class lainnya atau jika ingin memodifikasi
daftar_bangunan = None
while True:
masukan = input().split()
if(masukan[0] == "BANGUN"):
# dapatkan nilai ini dari masukan_split sesuai indexnya (lihat format input)
nama = None
jenis_bangunan = None
# lakukan selection untuk menentukan tipe Pegawai
if(jenis_bangunan == "HOTEL"):
bangunan = Hotel(nama) #instansiasi objek
elif(jenis_bangunan == "RESTORAN"):
bangunan = None
elif(jenis_bangunan == "RUMAHSAKIT"):
bangunan = None
# masukan bangunan yang sudah dibuat ke dalam dictionary
# cetak pesan sesuai format
elif(masukan[0] == "INFO"):
elif(masukan[0] == "JUALMAKANAN"):
elif(masukan[0] == "TERIMATAMU")
elif(masukan[0] == "OBATIPASIEN"):
elif(masukan[0] == "HITUNGUANG"):
| mit | Python | |
5bf67ac445da7b69dd4f883b8d4ed89bd17f8274 | add urlinfo with basic youtube parsing | desaster/uusipuu | modules/urlinfo.py | modules/urlinfo.py | from twisted.web.client import getPage
from twisted.internet.defer import inlineCallbacks
from core.Uusipuu import UusipuuModule
import re
import lxml.html
class Module(UusipuuModule):
def startup(self):
self.log('urlinfo.py loaded')
def privmsg(self, user, target, msg):
if target != self.channel:
return
urls = self.parse_urls(msg)
if not len(urls):
return
re_youtube = re.compile(
'^(https?\:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$')
for url in urls:
if re_youtube.match(url):
d = getPage(url)
d.addCallback(self.show_youtube)
def show_youtube(self, output):
if output is None or not len(output):
print('Received empty youtube data!')
return
data = self.parse_youtube(output)
print(data['title'])
self.chanmsg('%s' % (data['title'],))
def parse_youtube(self, output):
foo = lxml.html.fromstring(output)
title = None
for result in foo.iterfind('.//meta'):
prop = result.get('property')
if prop is None:
continue
if prop != 'og:title':
continue
title = result.get('content')
break
if not title:
return None
return {
'title': title,
}
def parse_urls(self, s):
# TODO: http://www.google.com/asdasd)
re_url = re.compile(
'(https?:\/\/(?:www\.|(?!www))[^\s\.]+\.[^\s]{2,}|www\.[^\s]+\.[^\s]{2,})')
matches = re_url.findall(s)
ret = []
for match in matches:
if match is None:
continue
if not match.startswith('http'):
ret.append('http://' + match)
else:
ret.append(match)
return ret
| bsd-2-clause | Python | |
fdb2dc8b54c5d7194639457444c32c20d5e2bfca | Create launch.py | MomsFriendlyRobotCompany/mote,MomsFriendlyRobotCompany/mote | launch.py | launch.py | #!/usr/bin/env python
from __future__ import print_function
from __future__ import division
import pygecko
import opencvutils as cvu
def run():
pass
if __name__ == '__main__':
run()
| mit | Python | |
cee5313906b2ee7e4fb01fc772e2afc6c4de1072 | Add simple lauch script without configuration options | mswart/openvpn2dns,mswart/openvpn2dns | launch.py | launch.py | from twisted.application import internet, service
from twisted.names import dns
from twisted.names import server
from openvpnzone import OpenVpnStatusAuthority, extract_status_file_path
def createOpenvpn2DnsService():
zones = [OpenVpnStatusAuthority(extract_status_file_path('server.conf'))]
f = server.DNSServerFactory(zones, None, None, 100)
p = dns.DNSDatagramProtocol(f)
f.noisy = 0
m = service.MultiService()
for (klass, arg) in [(internet.TCPServer, f), (internet.UDPServer, p)]:
s = klass(53535, arg)
s.setServiceParent(m)
return m
application = service.Application("OpenVPN2DNS")
createOpenvpn2DnsService().setServiceParent(application)
| mit | Python | |
281e328711b9724027eb6b64939bf9795fe86ac4 | Create linter.py | ipernet/yaml-linter-python-wrapper,KRDS/yaml-linter-python-wrapper | linter.py | linter.py | #!/usr/bin/python
import yaml, sys, getopt, os.path
def main(argv):
try:
opts, args = getopt.getopt(argv,"hi:")
except getopt.GetoptError:
print 'linter.py -i <inputfile.yml>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'linter.py -i <inputfile.yml>'
sys.exit()
elif opt == '-i':
if os.path.isfile(arg):
stream = open(arg, 'r')
try:
yaml.safe_load(stream)
sys.exit()
except yaml.scanner.ScannerError:
sys.exit(1)
else:
print "Input file is missing or not readable"
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| mit | Python | |
5d7574728290fd1afba39769bb933b12b6044ee9 | Create massinvandring_streamer.py | ArVID220u/invandringsbot | massinvandring_streamer.py | massinvandring_streamer.py | # the MassinvandringStreamer is a subclass of TwythonStreamer
from twython import TwythonStreamer
# the MassinvandringStreamer class will use the streaming api to find tweets containing the word 'massinvandring'
# This class could technically be used to reply to all kinds of tweets.
class MassinvandringStreamer(TwythonStreamer):
# this function will be called when a tweet is received
def on_success(self, data):
# generate a reply
print("should generate a reply; not implemented yet though")
# when an error is caught
def on_error(self, status_code, data):
print("STREAMING API ERROR!")
print("Status code:")
print(status_code)
print("Other data:")
print(data)
print("END OF ERROR MESSAGE")
| mit | Python | |
ced30f90907909090c0da0e468c855f400d9da92 | Add shallow tests for spin-1/2 general drudge | tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge | tests/spin_one_half_gen_test.py | tests/spin_one_half_gen_test.py | """Tests for the general model with explicit one-half spin."""
import pytest
from drudge import UP, DOWN, SpinOneHalfGenDrudge
@pytest.fixture(scope='module')
def dr(spark_ctx):
"""The fixture with a general spin one-half drudge."""
return SpinOneHalfGenDrudge(spark_ctx)
def test_spin_one_half_general_drudge_has_properties(dr):
"""Test the basic properties of the drudge."""
assert dr.spin_vals == [UP, DOWN]
assert dr.orig_ham.n_terms == 2 + 4
assert dr.ham.n_terms == 2 + 3
| mit | Python | |
251e88398541124555b0c87edf83a59c4ea0347a | add testing framework for new announcer | IanDCarroll/xox | tests/test_announcer_2_chair.py | tests/test_announcer_2_chair.py | import unittest
from source.announcer_2_chair import *
class AnnouncerTestCase(unittest.TestCase):
def setUp(self):
pass
def test_announcer_is_a_class(self):
pass
def test_announcer_has_a_show_method(self):
pass
def test_announcer_has_an_ask_human_method(self):
pass
| mit | Python | |
733dc300dff354312fdfa7588bcd7636117ac0c7 | Create SpatialFieldRetrieve.py | mapping-glory/SpatialFieldRetrieval | SpatialFieldRetrieve.py | SpatialFieldRetrieve.py | #-------------------------------------------------------------------------------
# Name: Spatial Field Retrieval
# Purpose: Retrieve a field from the source dataset and use it to populate
# the target field. Honors selections.
# Author: Andy Bradford
#
# Created: 25/02/2016
# Copyright: (c) andy.bradford 2016
#-------------------------------------------------------------------------------
import arcpy
from arcpy import env
env.overwriteOutput = True
#parameters
#Layer to be calculated
InLayer = arcpy.GetParameterAsText(0)
#InField: Layer which will receive final data
InField = arcpy.GetParameterAsText(1)
#SourceLayer: Layer which contributes data.
SourceLayer = arcpy.GetParameterAsText(2)
#SourceField: source field
SourceField = arcpy.GetParameterAsText(3)
#SpatShip = spatial relationship - same as Spatial Join tool
SpatShip = arcpy.GetParameterAsText(4)
#MergeRule: How to handle one-to-many relationships
MergeRule = arcpy.GetParameterAsText(5)
#SearchDist: search distance
SearchDist = arcpy.GetParameterAsText(6)
#Create field map a la forrestchev
#thanks to forrestchev on GIS StackExchange
#this field mapping code sets up the Spatial Join code later
#to create an output with only the Target_FID and the source field.
ScratchFMS = arcpy.FieldMappings()
ScratchFMS.addTable(SourceLayer)
SourceIndex = ScratchFMS.findFieldMapIndex(SourceField)
SourceFM = ScratchFMS.getFieldMap(SourceIndex)
ScratchFMS = arcpy.FieldMappings()
SourceFM.addInputField(SourceLayer, SourceField)
SourceFM.mergeRule = MergeRule
ScratchFMS.addFieldMap(SourceFM)
#spatial join to scratch features
arcpy.SpatialJoin_analysis(InLayer, SourceLayer, "ScratchSJ", "JOIN_ONE_TO_ONE",
"KEEP_ALL", ScratchFMS, SpatShip, SearchDist)
arcpy.AddMessage("Spatial Join completed.")
#create dictionary object for join purposes.
#the key will be the Target FID, and the value is the target field value.
JoinDict = {}
with arcpy.da.SearchCursor("ScratchSJ", ("TARGET_FID", SourceField)) as cursor:
for row in cursor:
fid = row[0]
val = row[1]
JoinDict[fid] = val
arcpy.AddMessage("Dictionary created.")
#Update cursor, hinges on dictionary
with arcpy.da.UpdateCursor(InLayer, ("OID@", InField)) as cursor:
#reach into dictionary using FID values
for row in cursor:
#Search for dictionary item with feature's FID as key
val = JoinDict[row[0]]
row[1] = str(val)
cursor.updateRow(row)
#delete ScratchSJ file.
arcpy.Delete_management("ScratchSJ")
| mit | Python | |
69ca53841e830c582dde304578ba40d3833ab920 | add script to performance of trace processor ingestion am: c893b17841 | google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto | tools/measure_tp_performance.py | tools/measure_tp_performance.py | #!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import signal
import sys
import subprocess
import psutil
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REGEX = re.compile(
'.*Trace loaded: ([0-9.]+) MB in ([0-9.]+)s \(([0-9.]+) MB/s\)')
def run_tp_until_ingestion(args, env):
tp_args = [os.path.join(args.out, 'trace_processor_shell'), args.trace_file]
if not args.ftrace_raw:
tp_args.append('--no-ftrace-raw')
tp = subprocess.Popen(
tp_args,
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env)
lines = []
while True:
line = tp.stderr.readline()
lines.append(line)
match = REGEX.match(line)
if match:
break
if tp.poll():
break
ret = tp.poll()
fail = ret is not None and ret > 0
if fail:
print("Failed")
for line in lines:
sys.stderr.write(line)
return tp, fail, match[2]
def heap_profile_run(args, dump_at_max: bool):
profile_args = [
os.path.join(ROOT_DIR, 'tools', 'heap_profile'), '-i', '1', '-n',
'trace_processor_shell', '--print-config'
]
if dump_at_max:
profile_args.append('--dump-at-max')
config = subprocess.check_output(
profile_args,
stderr=subprocess.DEVNULL,
)
out_file = os.path.join(
args.result, args.result_prefix + ('max' if dump_at_max else 'rest'))
perfetto_args = [
os.path.join(args.out, 'perfetto'), '-c', '-', '--txt', '-o', out_file
]
profile = subprocess.Popen(
perfetto_args,
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
profile.stdin.write(config)
profile.stdin.close()
env = {
'LD_PRELOAD': os.path.join(args.out, 'libheapprofd_glibc_preload.so'),
'TRACE_PROCESSOR_NO_MMAP': '1',
'PERFETTO_HEAPPROFD_BLOCKING_INIT': '1'
}
(tp, fail, _) = run_tp_until_ingestion(args, env)
profile.send_signal(signal.SIGINT)
profile.wait()
tp.stdin.close()
tp.wait()
if fail:
os.remove(out_file)
def regular_run(args):
env = {'TRACE_PROCESSOR_NO_MMAP': '1'}
(tp, fail, time) = run_tp_until_ingestion(args, env)
p = psutil.Process(tp.pid)
mem = 0
for m in p.memory_maps():
mem += m.anonymous
tp.stdin.close()
tp.wait()
print(f'Time taken: {time}s, Memory: {mem / 1024.0 / 1024.0}MB')
def only_sort_run(args):
env = {
'TRACE_PROCESSOR_NO_MMAP': '1',
'TRACE_PROCESSOR_SORT_ONLY': '1',
}
(tp, fail, time) = run_tp_until_ingestion(args, env)
tp.stdin.close()
tp.wait()
print(f'Time taken: {time}s')
def main():
parser = argparse.ArgumentParser(
description="This script measures the running time of "
"ingesting a trace with trace processor as well as profiling "
"trace processor's memory usage with heapprofd")
parser.add_argument('--out', type=str, help='Out directory', required=True)
parser.add_argument(
'--result', type=str, help='Result directory', required=True)
parser.add_argument(
'--result-prefix', type=str, help='Result file prefix', required=True)
parser.add_argument(
'--ftrace-raw',
action='store_true',
help='Whether to ingest ftrace into raw table',
default=False)
parser.add_argument('trace_file', type=str, help='Path to trace')
args = parser.parse_args()
traced = subprocess.Popen([os.path.join(args.out, 'traced')],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Heap profile dump at max')
heap_profile_run(args, dump_at_max=True)
print('Heap profile dump at resting')
heap_profile_run(args, dump_at_max=False)
print('Regular run')
regular_run(args)
print('Only sort run')
only_sort_run(args)
traced.send_signal(signal.SIGINT)
traced.wait()
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
4791122d34cbf4eaf6bc118c5e7e78346dee7010 | add cost_ensemble | madjelan/CostSensitiveClassification,albahnsen/CostSensitiveClassification | costcla/models/cost_ensemble.py | costcla/models/cost_ensemble.py | __author__ = 'al'
| bsd-3-clause | Python | |
d099466e604c77b3f16676aec9dd1a04fa22ba98 | Set point settimanali | raspibo/ThermoRed,raspibo/ThermoRed | cgi-bin/writedayssetpoints.py | cgi-bin/writedayssetpoints.py | #!/usr/bin/env python3
import os
import json
import cgi
import cgitb
cgitb.enable()
# Mi serve il file dei setpoins
# Se il file esiste lo apro, se no, genero entrambi SetPoints e file
with open("dayssetpointarray.json") as JsonFileDays:
DaysSetPoints = json.load(JsonFileDays)
# Intestazione HTML
print("<!DOCTYPE html>")
print("""
<html>
<head>
<title>ThermoRed</title>
<meta name="GENERATOR" content="Midnight Commander (mcedit)">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="Keywords" content="termoregolatore, thermo, temperatura, python">
<meta name="Author" content="Davide">
</head>
<body>
""") # End / Start body
form=cgi.FieldStorage()
Error = "No" # Serve per il calcolo/verifica di errore
for j in range(len(DaysSetPoints)):
for k in range(len(DaysSetPoints[j]["hours"])):
StrName = str(DaysSetPoints[j]["day"])+str(k)
#print (StrName)
if StrName not in form:
#if str(var) not in form:
#print("Error")
print("<br/>Errore:", StrName)
Error = "Error"
else:
DaysSetPoints[j]["hours"][k]["temperature"] = cgi.escape(form[StrName].value)
# Se non c'e` stato nessun errore, test e` uguale alla lunghezza
# dei dati prodotti e posso sovrascrivere il file
if Error == "No":
with open('dayssetpointarray.json', 'w') as outfile:
# Stampo a video la matrice se viene validata e inserita
print("""
<br/>
<h4>Dati correttamente inseriti</h4>
<br/>
<p>Questo e` il risultato della matrice inserita:</p>
""")
print(DaysSetPoints)
json.dump(DaysSetPoints, outfile, indent=4)
"""
--------------------------------------------------
# Per tutta la lunghezza/voci contenute nell'array .. (3)
for j in range(len(DaysSetPoints)):
for i in range(len(SetPoints)):
print("<tr><td>",SetPoints[i]["display"],": </td>")
# Per tutta la lunghezza/voci contenute nell'array .. giorni/ore
for k in range(len(DaysSetPoints[j]["hours"])):
# Concatenamento
print("<td><input type=\"radio\" name=\"",,"\" value=\"",SetPoints[i]["name"],"\"> </td>")
print("</tr>")
print("<tr><td>")
print("<b>",DaysSetPoints[j]["day"],"</b><hr></br>")
print("<td></tr>")
print("<td><input type=\"submit\" value=\"Submit\"></td>")
print("</table>")
print("</form>") # END form
"""
# End body/End html
print("""
</body>
</html>
""")
| mit | Python | |
abd6fab2000d8af016a0251ab9fb912c359a77ed | add atom for eLisp | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | compiler/eLisp2/eLisp/atom.py | compiler/eLisp2/eLisp/atom.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from error import UnimplementedFunctionError
from interface import Eval, Egal
from seq import Sequence, List
class Atom(Eval, Egal):
def __init__(self, data):
self.data = data
self.hint = 'atom'
def __eq__(self, rhs):
if isinstance(rhs, Atom):
return self.data == rhs.data
else:
return False
class Symbol(Atom):
def __init__(self, symbol):
super(Symbol, self).__init__(symbol)
def __repr__(self):
return self.data
def __hash__(self):
return hash(self.data)
def eval(self, env, args=None):
return env.get(self.data)
TRUE = Symbol('#t')
FALSE = List()
class String(Atom, Sequence):
def __init__(self, str):
Atom.__init__(self, str)
def __repr__(self):
return repr(self.data)
def eval(self, env, args=None):
return self
def cons(self, e):
if e.__class__ != self.__class__ and
e.__class__ != Symbol.__class__:
raise UnimplementedFunctionError(
'Cannot cons a string and a ',
e.__class__.__name__)
return String(e.data + self.data)
def car(self):
"""
`car` is roughly the same as `first` in linear eLisp
"""
return Symbol(self.data[0])
def cdr(self):
"""
`cdr` is roughly the same as 'rest' in linear eLisp
"""
return String(self.data[1:])
| bsd-2-clause | Python | |
59f9e552d16e7d4dca73b1232c0804d4ef3154a7 | Add functioning code for training sequence | KT12/hands_on_machine_learning | training_sequence_classifier.py | training_sequence_classifier.py | import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
with tf.variable_scope('rnn', initializer=tf.contrib.layers.variance_scaling_initializer()):
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
logits = tf.layers.dense(states, n_outputs)
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/data/')
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
n_epochs = 100
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, 'Train acc: ', acc_train, 'Test acc: ', acc_test) | mit | Python | |
6ea0d957a49f734151605c952768d15183d3a285 | Create __init__.py | CyberTriber/python | CyberGuard_v2/secrets/__init__.py | CyberGuard_v2/secrets/__init__.py | mit | Python | ||
9e38386947ba01effcf5908adad264aa77a688e5 | Add basic auth module | alorence/django-modern-rpc,alorence/django-modern-rpc | modernrpc/auth.py | modernrpc/auth.py | # coding: utf-8
def user_pass_test(func=None, test_function=None, params=None):
def decorated(function):
function.modernrpc_auth_check_function = test_function
function.modernrpc_auth_check_params = params
return function
# If @rpc_method is used without any argument nor parenthesis
if func is None:
def decorator(f):
return decorated(f)
return decorator
# If @rpc_method() is used with parenthesis (with or without arguments)
return decorated(func)
def check_user_is_logged(user):
if user:
return not user.is_anonymous()
return False
def check_user_is_admin(user):
if user:
return user.is_admin()
return False
def check_user_has_perm(user, perm):
if user:
return user.has_perm(perm)
return False
def check_user_has_perms(user, perms):
if user:
return user.has_perms(perms)
return False
def login_required(func=None):
def decorated(function):
return user_pass_test(function, check_user_is_logged)
# If @rpc_method is used without any argument nor parenthesis
if func is None:
def decorator(f):
return decorated(f)
return decorator
# If @rpc_method() is used with parenthesis (with or without arguments)
return decorated(func)
| mit | Python | |
2a44794af558563d9cdfc1d0ea9bf072fad41ffa | test soma_workflow working directory | neurospin/pylearn-epac,neurospin/pylearn-epac | epac/tests/test_swf_wd.py | epac/tests/test_swf_wd.py | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 17:07:47 2013
@author: jinpeng.li@cea.fr
"""
import tempfile
import os
from soma_workflow.client import Job, Workflow
from soma_workflow.client import Helper, FileTransfer
from soma_workflow.client import WorkflowController
import socket
import os.path
if __name__ == '__main__':
tmp_work_dir_path = tempfile.mkdtemp()
cur_work_dir = os.getcwd()
test_filepath = u"./onlytest.txt"
job = Job(command=[u"touch", test_filepath],
name="epac_job_test",
working_directory=tmp_work_dir_path)
soma_workflow = Workflow(jobs=[job])
resource_id = socket.gethostname()
controller = WorkflowController(resource_id, "", "")
## run soma-workflow
## =================
wf_id = controller.submit_workflow(workflow=soma_workflow,
name="epac workflow")
Helper.wait_workflow(wf_id, controller)
if not os.path.isfile(os.path.join(tmp_work_dir_path, test_filepath)):
raise ValueError("Soma-workflow cannot define working directory")
else:
print "OK"
| bsd-3-clause | Python | |
231029d867171ad5ee708c61d8a0aed60127aa9a | Add test for Link object. | CybOXProject/python-cybox | cybox/test/objects/link_test.py | cybox/test/objects/link_test.py | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import u
from cybox.objects.link_object import Link
from cybox.objects.uri_object import URI
from cybox.test.objects import ObjectTestCase
class TestLink(ObjectTestCase, unittest.TestCase):
object_type = "LinkObjectType"
klass = Link
_full_dict = {
'value': u("http://www.example.com"),
'type': URI.TYPE_URL,
'url_label': u("Click Here!"),
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | Python | |
07b198463951753535217ff1612c2789045c4046 | add manage.py | praekelt/seed-stage-based-messaging,praekelt/seed-stage-based-messaging,praekelt/seed-staged-based-messaging | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"seed_staged_based_messaging.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| bsd-3-clause | Python | |
2cad12729048dd5dc52b5d612656fe60bb3bd256 | Use '/usr/bin/env python' instead of '/usr/bin/python' in manage.py to support running manage.py as an executable in virtualenvs. | sudheesh001/oh-mainline,campbe13/openhatch,waseem18/oh-mainline,vipul-sharma20/oh-mainline,sudheesh001/oh-mainline,campbe13/openhatch,Changaco/oh-mainline,campbe13/openhatch,vipul-sharma20/oh-mainline,moijes12/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,sudheesh001/oh-mainline,SnappleCap/oh-mainline,ojengwa/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,nirmeshk/oh-mainline,mzdaniel/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,waseem18/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,eeshangarg/oh-mainline,eeshangarg/oh-mainline,sudheesh001/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,heeraj123/oh-mainline,nirmeshk/oh-mainline,willingc/oh-mainline,campbe13/openhatch,SnappleCap/oh-mainline,Changaco/oh-mainline,eeshangarg/oh-mainline,mzdaniel/oh-mainline,ehashman/oh-mainline,openhatch/oh-mainline,mzdaniel/oh-mainline,openhatch/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,moijes12/oh-mainline,ehashman/oh-mainline,heeraj123/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,heeraj123/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,onceuponatimeforever/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,waseem18/oh-mainline,nirmeshk/oh-mainline,willingc/oh-mainline,ojengwa/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,ojengwa/oh-mainline,eeshangarg/oh-mainline,nirmeshk/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,sudheesh001/oh-mainline,Changaco/oh-mainline,mzdaniel/oh-mainline,willingc/oh-mainline,mzdaniel/oh-mainline,mzdaniel/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,onceuponatimeforever/oh-mainline | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if not os.path.exists('mysite/manage.py'):
print "Eek, where is the real manage.py? Quitting."
sys.exit(1)
execfile('mysite/manage.py', globals(), locals())
| #!/usr/bin/python
import os
import sys
if not os.path.exists('mysite/manage.py'):
print "Eek, where is the real manage.py? Quitting."
sys.exit(1)
execfile('mysite/manage.py', globals(), locals())
| agpl-3.0 | Python |
be3c5b7f73025a88055fcc22cbfdafbd5829a1b7 | Add listCycle solution | lemming52/white_pawn,lemming52/white_pawn | hackerrank/listCycle/solution.py | hackerrank/listCycle/solution.py | """
A linked list is said to contain a cycle if any node is visited more than once while traversing the list.
Complete the function provided for you in your editor. It has one parameter: a pointer to a Node object named that points to the head of a linked list. Your function must return a boolean denoting whether or not there is a cycle in the list. If there is a cycle, return true; otherwise, return false.
Note: If the list is empty, will be null.
Input Format
Our hidden code checker passes the appropriate argument to your function. You are not responsible for reading any input from stdin.
Constraints
Output Format
If the list contains a cycle, your function must return true. If the list does not contain a cycle, it must return false. The binary integer corresponding to the boolean value returned by your function is printed to stdout by our hidden code checker.
Sample Input
The following linked lists are passed as arguments to your function:
Sample Inputs
Sample Output
0
1
Explanation
The first list has no cycle, so we return false and the hidden code checker prints to stdout.
The second list has a cycle, so we return true and the hidden code checker prints to stdout.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the has_cycle function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def has_cycle(head) -> bool:
if (head == None):
return False
else:
slow = head
fast = head.next
while (slow != fast) :
if (fast == None or fast.next == None):
return False
else:
slow = slow.next;
fast = fast.next.next;
return True
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
index = int(input())
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
extra = SinglyLinkedListNode(-1);
temp = llist.head;
for i in range(llist_count):
if i == index:
extra = temp
if i != llist_count-1:
temp = temp.next
temp.next = extra
result = has_cycle(llist.head)
fptr.write(str(int(result)) + '\n')
fptr.close() | mit | Python | |
979dbd0ff0fba03847ca96beaf4d68a0f4e5c9eb | Add beam information to 850um file | jpinedaf/B5_wide_multiple | data/b5_scuba2_850um_addbeam.py | data/b5_scuba2_850um_addbeam.py | import os
from astropy.io import fits
file_scuba2_raw='B5_850um_ext_v2_regrid.fits'
file_scuba2_out='B5_850um_ext_v2_regrid_beam.fits'
hdu = fits.open(file_scuba2_raw)
hdr =hdu[0].header
data=hdu[0].data
hdu.close()
hdr.append(('BMAJ', 14.6/3600.))
hdr.append(('BMIN', 14.6/3600.))
hdr.append(('BPA', 0.0))
os.system('rm -r '+file_scuba2_out)
fits.writeto(file_scuba2_out, data, hdr)
| mit | Python | |
d87311d349b3a7a25b23bd03804a27fd29e90b52 | add missing file | chfw/moban,chfw/moban | moban/data_loaders/manager.py | moban/data_loaders/manager.py | import os
from lml.plugin import PluginManager
from moban import constants
class AnyDataLoader(PluginManager):
def __init__(self):
super(AnyDataLoader, self).__init__(constants.DATA_LOADER_EXTENSION)
def get_data(self, file_name):
file_extension = os.path.splitext(file_name)[1]
file_type = file_extension
if file_extension.startswith("."):
file_type = file_type[1:]
try:
loader_function = self.load_me_now(file_type)
except Exception:
loader_function = self.load_me_now(constants.DEFAULT_DATA_TYPE)
return loader_function(file_name)
| mit | Python | |
5346b024ffc3e4eca25794214a4539cb8a20f08c | add monk file | atria-soft/eproperty,atria-soft/eproperty | monk_eproperty.py | monk_eproperty.py | #!/usr/bin/python
import monkModule
import monkTools as tools
import os
def get_desc():
return "E-property simple property interface"
def create():
# module name is 'ewol' and type binary.
myModule = monkModule.Module(__file__, 'eproperty', 'LIBRARY')
# enable doculentation :
myModule.set_website("http://atria-soft.github.io/eproperty/")
myModule.set_website_sources("http://github.com/atria-soft/eproperty/")
myModule.set_path(os.path.join(tools.get_current_path(__file__), "eproperty"))
myModule.set_path_general_doc(os.path.join(tools.get_current_path(__file__), "doc"))
# add the currrent module at the
return myModule
| apache-2.0 | Python | |
911787ff1c8d0fb03c522e42dcb5f8bacd7fcde6 | Create netstat_parser.py | robert-abela/net-statistics-parser | netstat_parser.py | netstat_parser.py | #The MIT License (MIT)
#
#Copyright (c) 2015 Robert Abela
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#See: https://github.com/robert-abela/net-statistics-parser
import os
import re
import csv
NP = r'(\d*)' #number pattern
class Value:
'''Internal class used to facilitate the ValueList management.'''
def __init__(self, header, pattern, section, subsection = None):
'''Contructor to create a new value that still needs to be parsed.'''
self.header = header
self.pattern = pattern
self.section = section
self.subsection = subsection
def read_value(self):
'''Calls ``netstat -es`` and parses the output to find the desired
value.'''
self.value = self.__parse_value()
def __parse_value(self):
current_section = ''
current_subsection = ''
return_value = None
#file = open(r'netstat.txt')
#for line in file:
for line in os.popen('netstat -es'):
line = line.strip('\r\n')
#print(line)
if len(line) == 0: #skip empty line
continue
if line.endswith(':'): #section/subsection heading
if line[0].isspace():
current_subsection = line.strip()
else:
current_section = line.strip()
current_subsection = ''
continue
if (self.section == current_section):
if not self.subsection or (self.subsection == current_subsection):
match = re.findall(self.pattern, line)
if match:
return_value = match[0]
break
#file.close()
return return_value
class ValueList:
def __init__(self):
'''Contructor for the ValueList class.'''
self.values = []
def write_csv(self, path):
'''
``netstat -es`` command is called and the parsing takes place. At the
end this function writes the values extracted to a CSV file in the path
supplied. This file is always written over.
Return: None
'''
headers_row = []
values_row = []
for value in self.values:
value.read_value()
headers_row.append(value.header)
values_row.append(value.value)
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(headers_row)
writer.writerow(values_row)
def add_value_to_parse(self, header, pattern, section, subsection = None):
'''
Adds a new value that needs to be read from the netstat output.
Parameter description: header will be used as the CSV heading for this
value. pattern will be used to match the line and extract a part of it.
section is the heading in netstat output where the value appears and
(optional) subsection is to be used only when applicable.
Return: None
'''
self.values.append(Value(header, pattern, section, subsection))
###############################################################################
# Edit only below this comment
###############################################################################
my_list = ValueList()
my_list.add_value_to_parse('BSR', NP + ' bad segments received.','Tcp:')
my_list.add_value_to_parse('OO', 'OutOctets: ' + NP, 'IpExt:')
my_list.add_value_to_parse('DU', 'destination unreachable: ' + NP, 'Icmp:', 'ICMP input histogram:')
# Once ready adding values to be parsed, do the parsing and output to csv
# (it will be over written every time).
my_list.write_csv(r'output.csv')
| mit | Python | |
3aa165d9527266d978d943437cb03816c30b8608 | add a fit_nh3 test | keflavich/pyspeckit,pyspeckit/pyspeckit,low-sky/pyspeckit,jpinedaf/pyspeckit,e-koch/pyspeckit,keflavich/pyspeckit,e-koch/pyspeckit,jpinedaf/pyspeckit,pyspeckit/pyspeckit,low-sky/pyspeckit | examples/ammonia_fit_example_wrapper.py | examples/ammonia_fit_example_wrapper.py | from __future__ import print_function
import pyspeckit
import numpy as np
from astropy import units as u
from pyspeckit.spectrum.models import ammonia
xarr = np.linspace(-40, 40, 300) * u.km/u.s
oneonemod = ammonia.ammonia(xarr.to(u.GHz, u.doppler_radio(ammonia.freq_dict['oneone']*u.Hz)),)
twotwomod = ammonia.ammonia(xarr.to(u.GHz, u.doppler_radio(ammonia.freq_dict['twotwo']*u.Hz)),)
sp11 = pyspeckit.Spectrum(xarr=xarr, data=oneonemod, unit=u.K,
xarrkwargs={'refX': ammonia.freq_dict['oneone']*u.Hz})
sp22 = pyspeckit.Spectrum(xarr=xarr, data=twotwomod, unit=u.K,
xarrkwargs={'refX': ammonia.freq_dict['twotwo']*u.Hz})
input_dict={'oneone':sp11, 'twotwo':sp22,}
spf, specout = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict, dobaseline=False)
print(specout.specfit.modelpars)
print(specout.specfit.parinfo)
spf2, specout2 = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict,
dobaseline=True,
baselinekwargs={'exclude':[-30,30]*u.km/u.s})
print(specout.specfit.modelpars)
print(specout.specfit.parinfo)
| mit | Python | |
29d8e20e41ab599030cd1027069ba01f569c1627 | add terminal highlight utils | garyelephant/pyutils | highlight.py | highlight.py | class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def highlight( s, **term_color ):
"""return ANSI color rendered string
This will work on unixes including OS X, linux and windows (provided you enable ansi.sys).
"""
color_pos = {}
for term, color in term_color.items():
start = 0
while start < len( s ):
term_start = s.find( term, start )
term_end = term_start + len( term )
if term_start == -1:
break
start = term_start + 1
color_pos[ term_start ] = color
color_pos[ term_end ] = bcolors.ENDC
if len( color_pos ) == 0:
return s
segments = []
last_i = 0
for i in range( len( s ) + 1 ):
if i in color_pos:
segments.append( s[ last_i : i ] )
segments.append( color_pos[ i ] )
last_i = i
segments.append( s[ last_i : ] )
return ''.join( segments )
if __name__ == '__main__':
s = "How to print string with color in terminal? \n" \
"This somewhat depends on what platform you are on. \n" \
"The most common way to do this is by printing ANSI escape sequences."
terms_color = {
'print string with color': bcolors.WARNING,
'platform': bcolors.OKBLUE,
'ANSI escape sequences': bcolors.FAIL
}
print highlight( s, **terms_color )
| mit | Python | |
23607247006f36034ba29eba0fddbc35c9f407b4 | add script for automatic dependency updates | janxb/php-ical,janxb/php-ical,janxb/php-ical,janxb/php-ical | bin/update-web-dependencies.py | bin/update-web-dependencies.py | #!/usr/bin/env python
import sys,re,urllib.request,json,fileinput
from prettytable import PrettyTable
def replaceAll(file,searchExp,replaceExp):
for line in fileinput.input(file, inplace=1):
if searchExp in line:
line = line.replace(searchExp,replaceExp)
sys.stdout.write(line)
filename = sys.argv[1];
updates = PrettyTable(['Dependency', 'Old Version', 'New Version'])
for line in open(filename).read().split("\n"):
if "cdnjs" in line:
dependency = re.match("(?:.+)\/ajax\/libs\/([a-z\-\.]+)\/([0-9a-zA-Z\.\-]+)\/", line);
if dependency:
dep_name = dependency.groups()[0];
dep_version = dependency.groups()[1];
with urllib.request.urlopen("https://api.cdnjs.com/libraries/"+dep_name+"?fields=name,version") as url:
data = json.loads(url.read().decode())
dep_version_new = data["version"]
if dep_version != dep_version_new:
updatedLine = line.replace(dep_version,dep_version_new)
replaceAll(filename, line, updatedLine)
updates.add_row([dep_name, dep_version, dep_version_new])
if "use.fontawesome" in line:
dependency = re.match("(?:.+)\/releases\/([0-9a-zA-Z\.\-]+)\/", line);
if dependency:
dep_name = "fontawesome";
dep_version = dependency.groups()[0];
with urllib.request.urlopen("https://api.github.com/repos/FortAwesome/Font-Awesome/releases/latest") as url:
data = json.loads(url.read().decode())
dep_version_new = "v"+data["tag_name"]
if dep_version != dep_version_new:
updatedLine = line.replace(dep_version,dep_version_new)
replaceAll(filename, line, updatedLine)
updates.add_row([dep_name, dep_version, dep_version_new])
print(updates) | mit | Python | |
f2f2f8833628058052fae0c5c814e42411f681d2 | Add migrations | smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning | python/ecep/portal/migrations/0021_auto_20170625_1454.py | python/ecep/portal/migrations/0021_auto_20170625_1454.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal', '0020_auto_20170525_1921'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='location',
name='email',
field=models.EmailField(max_length=254, blank=True),
),
]
| mit | Python | |
4feb6d987f92981542eea2a9501e363277ce4109 | Create Tcheck2_clear.py | martymcwizard/CS512_link_predictor,martymcwizard/CS512_link_predictor | Tcheck2_clear.py | Tcheck2_clear.py | import sys
from itertools import combinations
F_dict = dict()
M_dict = dict()
counter = 0
fileIn1 = sys.argv[1] #feb_less_april_clear
fileIn2 = sys.argv[2] #mar_clear.txt
fileOut1 = sys.argv[3] #t_pairs_clear.feb
fileOut2 = sys.argv[4] #t_uniq_pairs_clear.mar
fi = open(fileIn1, 'r')
num_lines = sum(1 for line in fi)
fi.close()
print 'Number of lines in training data', num_lines
foF = open(fileOut1, 'w+')
fi = open(fileIn1, 'r')
for line in range(0, num_lines): #num_lines):
inline = [i for i in fi.readline().rstrip('\n').split('|')]
if len(inline) > 3:
aidline = inline[2:]
aidline = sorted(set(aidline), reverse = False)
while 'NoMATCH' in aidline:
aidline.remove('NoMATCH')
#print aidline
for c in combinations(aidline,2):
c = sorted(c, reverse = False)
cstr = c[0] + c[1]
if cstr in F_dict:
F_dict[cstr] += 1
else:
F_dict[cstr] = 1
print 'Number of lines packed into a dictionary from first file', len(F_dict)
for key, value in F_dict.iteritems():
foF.write(key[:6] + '|' + key[-6:] + '|' + str(value) + '\n')
foF.close()
fi.close()
fi = open(fileIn2, 'r')
num_lines = sum(1 for line in fi)
fi.close()
print 'Number of rows in test data', num_lines
foR = open(fileOut2, 'w+')
fi = open(fileIn2, 'r')
for line in range(0, num_lines): #num_lines):
inline = [i for i in fi.readline().rstrip('\n').split('|')]
if len(inline) > 3:
aidline = inline[2:]
aidline = sorted(set(aidline), reverse = False)
while 'NoMATCH' in aidline:
aidline.remove('NoMATCH')
for c in combinations(aidline,2):
#print c
c = sorted(c, reverse = False)
#print 'sorted', c
cstr = c[0] + c[1]
if cstr not in F_dict:
if cstr in M_dict:
M_dict[cstr] += 1
else:
M_dict[cstr] = 1
print 'Number of lines packed into a dictionary from first and second file', len(F_dict)
for key, value in M_dict.iteritems():
foR.write(key[:6] + '|' + key[-6:] + '|' + str(value) + '\n')
print 'Number of new, unique pairs in test data: ', len(M_dict)
foR.close()
fi.close()
| mit | Python | |
64c21a3e01d50cdc6a719f0e4e48f925d5dd9e03 | Add tests for very big AST trees | JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton | src/testers/unittests/test_ast_deep.py | src/testers/unittests/test_ast_deep.py | import unittest
from triton import *
DEPTH = 10000
class TestDeep(unittest.TestCase):
def setUp(self):
"""Define the arch."""
self.triton = TritonContext()
self.triton.setArchitecture(ARCH.X86_64)
self.ctx = self.triton.getAstContext()
sym_var = self.ctx.variable(self.triton.convertRegisterToSymbolicVariable(self.triton.registers.rax))
add_inst = Instruction()
add_inst.setAddress(0x100)
add_inst.setOpcode("\x48\x01\xc0") # add rax, rax
sub_inst = Instruction()
sub_inst.setOpcode("\x48\x29\xC0") # sub rax, rax
for _ in range(DEPTH):
self.triton.processing(add_inst)
sub_inst.setAddress(add_inst.getAddress() + add_inst.getSize())
self.triton.processing(sub_inst)
add_inst.setAddress(sub_inst.getAddress() + sub_inst.getSize())
self.complex_ast_tree = self.triton.getSymbolicRegister(self.triton.registers.rax).getAst()
def test_z3_conversion(self):
result = self.triton.simplify(self.complex_ast_tree, True)
answer = self.ctx.bv(0, 64)
self.assertEqual(str(result), str(answer))
def test_duplication(self):
s = self.ctx.duplicate(self.complex_ast_tree)
| apache-2.0 | Python | |
c2f79200689171a49c5bd72e6354ba56ee09a6b6 | Upgrade libchromiumcontent to contain printing headers. | simonfork/electron,anko/electron,kazupon/electron,Faiz7412/electron,JesselJohn/electron,etiktin/electron,rajatsingla28/electron,oiledCode/electron,brave/electron,Zagorakiss/electron,voidbridge/electron,thomsonreuters/electron,pirafrank/electron,sky7sea/electron,mattdesl/electron,evgenyzinoviev/electron,yan-foto/electron,webmechanicx/electron,SufianHassan/electron,d-salas/electron,rreimann/electron,nicobot/electron,egoist/electron,nagyistoce/electron-atom-shell,neutrous/electron,Rokt33r/electron,carsonmcdonald/electron,astoilkov/electron,fireball-x/atom-shell,kenmozi/electron,JussMee15/electron,neutrous/electron,darwin/electron,kenmozi/electron,yalexx/electron,pandoraui/electron,roadev/electron,fabien-d/electron,tylergibson/electron,mjaniszew/electron,digideskio/electron,kikong/electron,tinydew4/electron,RobertJGabriel/electron,mattdesl/electron,etiktin/electron,twolfson/electron,roadev/electron,Faiz7412/electron,takashi/electron,farmisen/electron,egoist/electron,chriskdon/electron,adamjgray/electron,anko/electron,icattlecoder/electron,gbn972/electron,MaxGraey/electron,wolfflow/electron,davazp/electron,gabriel/electron,yalexx/electron,setzer777/electron,Zagorakiss/electron,medixdev/electron,iftekeriba/electron,DivyaKMenon/electron,kazupon/electron,ervinb/electron,Neron-X5/electron,nicholasess/electron,matiasinsaurralde/electron,dongjoon-hyun/electron,fritx/electron,michaelchiche/electron,egoist/electron,jcblw/electron,oiledCode/electron,BionicClick/electron,bruce/electron,Jonekee/electron,John-Lin/electron,voidbridge/electron,JussMee15/electron,zhakui/electron,mjaniszew/electron,felixrieseberg/electron,jiaz/electron,howmuchcomputer/electron,vipulroxx/electron,Ivshti/electron,synaptek/electron,thomsonreuters/electron,Ivshti/electron,smczk/electron,MaxGraey/electron,setzer777/electron,voidbridge/electron,noikiy/electron,greyhwndz/electron,zhakui/electron,GoooIce/electron,pirafrank/electron,destan/electron,farmisen/electron,jcblw/electron,astoilkov/electron,jsutcodes/electron,sshiting/electron,preco21/electron,kikong/electron,the-ress/electron,adcentury/electron,Gerhut/electron,posix4e/electron,electron/electron,medixdev/electron,cqqccqc/electron,matiasinsaurralde/electron,kostia/electron,greyhwndz/electron,etiktin/electron,thingsinjars/electron,jlord/electron,wan-qy/electron,Floato/electron,jonatasfreitasv/electron,renaesop/electron,micalan/electron,nekuz0r/electron,wolfflow/electron,jaanus/electron,micalan/electron,meowlab/electron,jonatasfreitasv/electron,adamjgray/electron,biblerule/UMCTelnetHub,mattotodd/electron,joneit/electron,fomojola/electron,RIAEvangelist/electron,simonfork/electron,nicobot/electron,BionicClick/electron,aaron-goshine/electron,shockone/electron,eric-seekas/electron,webmechanicx/electron,thompsonemerson/electron,aecca/electron,dongjoon-hyun/electron,jtburke/electron,ervinb/electron,greyhwndz/electron,vipulroxx/electron,the-ress/electron,dkfiresky/electron,tomashanacek/electron,bbondy/electron,cqqccqc/electron,gerhardberger/electron,SufianHassan/electron,aaron-goshine/electron,natgolov/electron,Neron-X5/electron,MaxGraey/electron,renaesop/electron,Evercoder/electron,miniak/electron,roadev/electron,mirrh/electron,kokdemo/electron,gstack/infinium-shell,vHanda/electron,hokein/atom-shell,pombredanne/electron,chriskdon/electron,jonatasfreitasv/electron,gbn972/electron,noikiy/electron,IonicaBizauKitchen/electron,edulan/electron,astoilkov/electron,timruffles/electron,wolfflow/electron,thompsonemerson/electron,pombredanne/electron,ianscrivener/electron,howmuchcomputer/electron,joneit/electron,jacksondc/electron,carsonmcdonald/electron,micalan/electron,Gerhut/electron,timruffles/electron,the-ress/electron,bruce/electron,leolujuyi/electron,vaginessa/electron,lzpfmh/electron,trankmichael/electron,sky7sea/electron,fireball-x/atom-shell,John-Lin/electron,kokdemo/electron,jiaz/electron,fomojola/electron,SufianHassan/electron,digideskio/electron,leolujuyi/electron,darwin/electron,takashi/electron,dahal/electron,yalexx/electron,preco21/electron,Evercoder/electron,mattotodd/electron,mhkeller/electron,thingsinjars/electron,mhkeller/electron,RobertJGabriel/electron,adcentury/electron,twolfson/electron,destan/electron,GoooIce/electron,brave/electron,medixdev/electron,shockone/electron,BionicClick/electron,soulteary/electron,timruffles/electron,seanchas116/electron,micalan/electron,jannishuebl/electron,mattotodd/electron,rajatsingla28/electron,LadyNaggaga/electron,kcrt/electron,lrlna/electron,DivyaKMenon/electron,jiaz/electron,aaron-goshine/electron,brenca/electron,carsonmcdonald/electron,jacksondc/electron,leethomas/electron,aaron-goshine/electron,d-salas/electron,MaxWhere/electron,christian-bromann/electron,jaanus/electron,fomojola/electron,dkfiresky/electron,fritx/electron,seanchas116/electron,stevemao/electron,leolujuyi/electron,RIAEvangelist/electron,benweissmann/electron,oiledCode/electron,fomojola/electron,Floato/electron,simongregory/electron,meowlab/electron,aichingm/electron,twolfson/electron,wan-qy/electron,webmechanicx/electron,rreimann/electron,fritx/electron,kikong/electron,webmechanicx/electron,shiftkey/electron,RobertJGabriel/electron,John-Lin/electron,christian-bromann/electron,digideskio/electron,kcrt/electron,arturts/electron,subblue/electron,bright-sparks/electron,coderhaoxin/electron,brave/electron,thingsinjars/electron,beni55/electron,cqqccqc/electron,mirrh/electron,destan/electron,pombredanne/electron,kazupon/electron,stevekinney/electron,leolujuyi/electron,twolfson/electron,dahal/electron,evgenyzinoviev/electron,Floato/electron,darwin/electron,Gerhut/electron,bpasero/electron,Jonekee/electron,jhen0409/electron,mubassirhayat/electron,tinydew4/electron,shaundunne/electron,MaxWhere/electron,gerhardberger/electron,coderhaoxin/electron,Jacobichou/electron,thompsonemerson/electron,LadyNaggaga/electron,JesselJohn/electron,iftekeriba/electron,adamjgray/electron,bbondy/electron,arturts/electron,neutrous/electron,RIAEvangelist/electron,hokein/atom-shell,bpasero/electron,thomsonreuters/electron,vHanda/electron,mattotodd/electron,posix4e/electron,kazupon/electron,anko/electron,xfstudio/electron,arusakov/electron,jhen0409/electron,tinydew4/electron,sircharleswatson/electron,gamedevsam/electron,electron/electron,jjz/electron,noikiy/electron,rsvip/electron,aichingm/electron,davazp/electron,preco21/electron,stevekinney/electron,Evercoder/electron,wolfflow/electron,joaomoreno/atom-shell,jlhbaseball15/electron,stevemao/electron,rajatsingla28/electron,Jonekee/electron,mattdesl/electron,jiaz/electron,christian-bromann/electron,arturts/electron,edulan/electron,meowlab/electron,Floato/electron,deed02392/electron,tylergibson/electron,rprichard/electron,brenca/electron,shockone/electron,ankitaggarwal011/electron,mjaniszew/electron,MaxGraey/electron,faizalpribadi/electron,simongregory/electron,jhen0409/electron,tincan24/electron,shennushi/electron,jhen0409/electron,gabrielPeart/electron,roadev/electron,biblerule/UMCTelnetHub,sircharleswatson/electron,JesselJohn/electron,Jacobichou/electron,shockone/electron,twolfson/electron,Zagorakiss/electron,biblerule/UMCTelnetHub,shockone/electron,d-salas/electron,thomsonreuters/electron,yalexx/electron,ervinb/electron,christian-bromann/electron,nicobot/electron,zhakui/electron,maxogden/atom-shell,kostia/electron,takashi/electron,gbn972/electron,subblue/electron,tinydew4/electron,adcentury/electron,vaginessa/electron,maxogden/atom-shell,fritx/electron,kostia/electron,oiledCode/electron,greyhwndz/electron,jannishuebl/electron,gamedevsam/electron,aichingm/electron,natgolov/electron,abhishekgahlot/electron,Jacobichou/electron,darwin/electron,Gerhut/electron,miniak/electron,kokdemo/electron,posix4e/electron,bright-sparks/electron,adamjgray/electron,preco21/electron,lrlna/electron,Faiz7412/electron,icattlecoder/electron,miniak/electron,jacksondc/electron,rsvip/electron,matiasinsaurralde/electron,gerhardberger/electron,RobertJGabriel/electron,deepak1556/atom-shell,kcrt/electron,IonicaBizauKitchen/electron,brave/electron,subblue/electron,noikiy/electron,icattlecoder/electron,greyhwndz/electron,RobertJGabriel/electron,micalan/electron,trigrass2/electron,iftekeriba/electron,systembugtj/electron,shennushi/electron,bright-sparks/electron,wan-qy/electron,jaanus/electron,d-salas/electron,lzpfmh/electron,mubassirhayat/electron,JussMee15/electron,trigrass2/electron,jannishuebl/electron,chrisswk/electron,MaxGraey/electron,mhkeller/electron,sircharleswatson/electron,tonyganch/electron,meowlab/electron,soulteary/electron,ankitaggarwal011/electron,michaelchiche/electron,aecca/electron,fabien-d/electron,robinvandernoord/electron,fabien-d/electron,leftstick/electron,deed02392/electron,Ivshti/electron,jsutcodes/electron,fabien-d/electron,chriskdon/electron,robinvandernoord/electron,smczk/electron,Andrey-Pavlov/electron,gstack/infinium-shell,dkfiresky/electron,mjaniszew/electron,rprichard/electron,shaundunne/electron,shaundunne/electron,thompsonemerson/electron,astoilkov/electron,smczk/electron,vHanda/electron,GoooIce/electron,leolujuyi/electron,kostia/electron,systembugtj/electron,adamjgray/electron,fomojola/electron,rreimann/electron,rajatsingla28/electron,tonyganch/electron,d-salas/electron,gabrielPeart/electron,etiktin/electron,destan/electron,synaptek/electron,mjaniszew/electron,simongregory/electron,pandoraui/electron,farmisen/electron,faizalpribadi/electron,simonfork/electron,xfstudio/electron,smczk/electron,joneit/electron,dkfiresky/electron,arusakov/electron,synaptek/electron,gstack/infinium-shell,benweissmann/electron,digideskio/electron,deed02392/electron,brenca/electron,Gerhut/electron,timruffles/electron,howmuchcomputer/electron,thingsinjars/electron,mattotodd/electron,soulteary/electron,vHanda/electron,aliib/electron,coderhaoxin/electron,sshiting/electron,maxogden/atom-shell,tinydew4/electron,bpasero/electron,preco21/electron,leethomas/electron,jtburke/electron,kikong/electron,biblerule/UMCTelnetHub,gamedevsam/electron,pirafrank/electron,destan/electron,BionicClick/electron,gerhardberger/electron,the-ress/electron,jaanus/electron,dongjoon-hyun/electron,gerhardberger/electron,yalexx/electron,subblue/electron,mattdesl/electron,thomsonreuters/electron,howmuchcomputer/electron,John-Lin/electron,vaginessa/electron,brave/muon,bobwol/electron,systembugtj/electron,natgolov/electron,seanchas116/electron,setzer777/electron,eriser/electron,posix4e/electron,iftekeriba/electron,evgenyzinoviev/electron,electron/electron,BionicClick/electron,benweissmann/electron,dongjoon-hyun/electron,GoooIce/electron,jcblw/electron,jsutcodes/electron,aichingm/electron,xiruibing/electron,eric-seekas/electron,tylergibson/electron,deepak1556/atom-shell,stevekinney/electron,beni55/electron,JussMee15/electron,Jacobichou/electron,kenmozi/electron,neutrous/electron,brave/electron,xiruibing/electron,abhishekgahlot/electron,JesselJohn/electron,sircharleswatson/electron,Ivshti/electron,michaelchiche/electron,jacksondc/electron,jlhbaseball15/electron,voidbridge/electron,jcblw/electron,xfstudio/electron,aichingm/electron,pandoraui/electron,davazp/electron,saronwei/electron,miniak/electron,benweissmann/electron,wan-qy/electron,ervinb/electron,MaxWhere/electron,trankmichael/electron,bbondy/electron,soulteary/electron,hokein/atom-shell,edulan/electron,Jacobichou/electron,pirafrank/electron,Faiz7412/electron,nekuz0r/electron,felixrieseberg/electron,jannishuebl/electron,jtburke/electron,electron/electron,baiwyc119/electron,rsvip/electron,leftstick/electron,sshiting/electron,LadyNaggaga/electron,stevemao/electron,adamjgray/electron,aaron-goshine/electron,robinvandernoord/electron,Zagorakiss/electron,gbn972/electron,IonicaBizauKitchen/electron,Evercoder/electron,xfstudio/electron,shaundunne/electron,takashi/electron,tomashanacek/electron,evgenyzinoviev/electron,fireball-x/atom-shell,benweissmann/electron,fffej/electron,digideskio/electron,lrlna/electron,joneit/electron,chriskdon/electron,evgenyzinoviev/electron,eriser/electron,edulan/electron,gabriel/electron,electron/electron,mattotodd/electron,smczk/electron,shiftkey/electron,tomashanacek/electron,mirrh/electron,michaelchiche/electron,jjz/electron,the-ress/electron,kostia/electron,farmisen/electron,minggo/electron,mhkeller/electron,brenca/electron,minggo/electron,shiftkey/electron,sshiting/electron,chrisswk/electron,MaxWhere/electron,joneit/electron,nicholasess/electron,fireball-x/atom-shell,rhencke/electron,the-ress/electron,nagyistoce/electron-atom-shell,IonicaBizauKitchen/electron,brave/muon,bpasero/electron,medixdev/electron,anko/electron,voidbridge/electron,natgolov/electron,mrwizard82d1/electron,chrisswk/electron,mirrh/electron,kcrt/electron,GoooIce/electron,xiruibing/electron,arusakov/electron,gabriel/electron,setzer777/electron,eric-seekas/electron,tonyganch/electron,twolfson/electron,stevemao/electron,eric-seekas/electron,jlord/electron,shiftkey/electron,brave/muon,matiasinsaurralde/electron,beni55/electron,MaxWhere/electron,DivyaKMenon/electron,webmechanicx/electron,vHanda/electron,eriser/electron,icattlecoder/electron,simonfork/electron,carsonmcdonald/electron,bitemyapp/electron,brave/muon,yalexx/electron,ervinb/electron,xiruibing/electron,nekuz0r/electron,jlhbaseball15/electron,fomojola/electron,synaptek/electron,nicholasess/electron,carsonmcdonald/electron,robinvandernoord/electron,mrwizard82d1/electron,arturts/electron,beni55/electron,Andrey-Pavlov/electron,aliib/electron,gstack/infinium-shell,egoist/electron,mhkeller/electron,xfstudio/electron,micalan/electron,jonatasfreitasv/electron,roadev/electron,dkfiresky/electron,Ivshti/electron,kikong/electron,oiledCode/electron,lrlna/electron,fireball-x/atom-shell,baiwyc119/electron,noikiy/electron,zhakui/electron,matiasinsaurralde/electron,LadyNaggaga/electron,jhen0409/electron,systembugtj/electron,hokein/atom-shell,jlhbaseball15/electron,biblerule/UMCTelnetHub,LadyNaggaga/electron,darwin/electron,jcblw/electron,synaptek/electron,gamedevsam/electron,rajatsingla28/electron,trankmichael/electron,medixdev/electron,bobwol/electron,jlord/electron,ankitaggarwal011/electron,brave/muon,jiaz/electron,shennushi/electron,fffej/electron,smczk/electron,robinvandernoord/electron,kenmozi/electron,pirafrank/electron,Andrey-Pavlov/electron,ervinb/electron,aecca/electron,bbondy/electron,astoilkov/electron,Evercoder/electron,nekuz0r/electron,renaesop/electron,JesselJohn/electron,bitemyapp/electron,jannishuebl/electron,gabriel/electron,Rokt33r/electron,brave/electron,wolfflow/electron,rhencke/electron,tomashanacek/electron,felixrieseberg/electron,adcentury/electron,chriskdon/electron,oiledCode/electron,sky7sea/electron,SufianHassan/electron,aliib/electron,IonicaBizauKitchen/electron,setzer777/electron,sircharleswatson/electron,bobwol/electron,noikiy/electron,lzpfmh/electron,lzpfmh/electron,seanchas116/electron,tylergibson/electron,tomashanacek/electron,vipulroxx/electron,arusakov/electron,rsvip/electron,howmuchcomputer/electron,simonfork/electron,ianscrivener/electron,renaesop/electron,d-salas/electron,shennushi/electron,jhen0409/electron,dongjoon-hyun/electron,saronwei/electron,farmisen/electron,brenca/electron,Floato/electron,zhakui/electron,mirrh/electron,bruce/electron,eriser/electron,dkfiresky/electron,bobwol/electron,stevekinney/electron,tomashanacek/electron,faizalpribadi/electron,shaundunne/electron,arusakov/electron,natgolov/electron,RIAEvangelist/electron,DivyaKMenon/electron,gabrielPeart/electron,tincan24/electron,Jacobichou/electron,Faiz7412/electron,deepak1556/atom-shell,MaxWhere/electron,lzpfmh/electron,mhkeller/electron,simongregory/electron,shennushi/electron,digideskio/electron,simongregory/electron,sshiting/electron,JesselJohn/electron,jtburke/electron,ianscrivener/electron,jonatasfreitasv/electron,jjz/electron,dongjoon-hyun/electron,shaundunne/electron,simongregory/electron,biblerule/UMCTelnetHub,davazp/electron,thingsinjars/electron,yan-foto/electron,stevemao/electron,bwiggs/electron,soulteary/electron,bobwol/electron,Jonekee/electron,greyhwndz/electron,beni55/electron,gerhardberger/electron,bpasero/electron,tonyganch/electron,Gerhut/electron,jtburke/electron,felixrieseberg/electron,minggo/electron,Zagorakiss/electron,jsutcodes/electron,bitemyapp/electron,aliib/electron,brenca/electron,Rokt33r/electron,bruce/electron,rprichard/electron,renaesop/electron,tincan24/electron,BionicClick/electron,ankitaggarwal011/electron,nicobot/electron,electron/electron,nagyistoce/electron-atom-shell,baiwyc119/electron,howmuchcomputer/electron,coderhaoxin/electron,jonatasfreitasv/electron,sky7sea/electron,jcblw/electron,leftstick/electron,arturts/electron,pandoraui/electron,fffej/electron,trankmichael/electron,nekuz0r/electron,tinydew4/electron,maxogden/atom-shell,medixdev/electron,pombredanne/electron,yan-foto/electron,yan-foto/electron,kazupon/electron,joaomoreno/atom-shell,nicobot/electron,kenmozi/electron,kazupon/electron,eriser/electron,pandoraui/electron,Evercoder/electron,IonicaBizauKitchen/electron,lrlna/electron,tincan24/electron,shiftkey/electron,wan-qy/electron,trigrass2/electron,aliib/electron,ankitaggarwal011/electron,cqqccqc/electron,Neron-X5/electron,leftstick/electron,natgolov/electron,bitemyapp/electron,thingsinjars/electron,kcrt/electron,jannishuebl/electron,Andrey-Pavlov/electron,jjz/electron,iftekeriba/electron,cqqccqc/electron,aliib/electron,trigrass2/electron,RIAEvangelist/electron,takashi/electron,deepak1556/atom-shell,thompsonemerson/electron,synaptek/electron,DivyaKMenon/electron,bwiggs/electron,tincan24/electron,hokein/atom-shell,abhishekgahlot/electron,davazp/electron,bpasero/electron,leethomas/electron,vipulroxx/electron,cos2004/electron,jjz/electron,arusakov/electron,deed02392/electron,tylergibson/electron,dahal/electron,felixrieseberg/electron,bright-sparks/electron,voidbridge/electron,beni55/electron,saronwei/electron,robinvandernoord/electron,stevemao/electron,cos2004/electron,RIAEvangelist/electron,simonfork/electron,mirrh/electron,gstack/infinium-shell,jaanus/electron,davazp/electron,neutrous/electron,mrwizard82d1/electron,kokdemo/electron,baiwyc119/electron,egoist/electron,michaelchiche/electron,christian-bromann/electron,John-Lin/electron,destan/electron,rprichard/electron,rsvip/electron,Zagorakiss/electron,saronwei/electron,mrwizard82d1/electron,roadev/electron,systembugtj/electron,pandoraui/electron,xfstudio/electron,trigrass2/electron,mubassirhayat/electron,sky7sea/electron,Rokt33r/electron,jacksondc/electron,vaginessa/electron,leftstick/electron,lrlna/electron,Rokt33r/electron,seanchas116/electron,felixrieseberg/electron,edulan/electron,coderhaoxin/electron,icattlecoder/electron,joneit/electron,kokdemo/electron,shockone/electron,preco21/electron,Jonekee/electron,bruce/electron,Rokt33r/electron,bwiggs/electron,wan-qy/electron,kenmozi/electron,Andrey-Pavlov/electron,gabrielPeart/electron,mubassirhayat/electron,jsutcodes/electron,Neron-X5/electron,gbn972/electron,jiaz/electron,minggo/electron,leolujuyi/electron,mrwizard82d1/electron,tincan24/electron,sshiting/electron,kokdemo/electron,jlhbaseball15/electron,jlord/electron,anko/electron,aecca/electron,rreimann/electron,bbondy/electron,gbn972/electron,cos2004/electron,nicholasess/electron,leethomas/electron,maxogden/atom-shell,nagyistoce/electron-atom-shell,fritx/electron,soulteary/electron,vipulroxx/electron,michaelchiche/electron,lzpfmh/electron,farmisen/electron,mattdesl/electron,mjaniszew/electron,fffej/electron,rreimann/electron,ianscrivener/electron,minggo/electron,nicobot/electron,joaomoreno/atom-shell,stevekinney/electron,saronwei/electron,nekuz0r/electron,nicholasess/electron,pirafrank/electron,xiruibing/electron,sky7sea/electron,ankitaggarwal011/electron,deepak1556/atom-shell,gerhardberger/electron,rhencke/electron,eric-seekas/electron,xiruibing/electron,nagyistoce/electron-atom-shell,nicholasess/electron,trankmichael/electron,rhencke/electron,vaginessa/electron,RobertJGabriel/electron,joaomoreno/atom-shell,cos2004/electron,Neron-X5/electron,dahal/electron,jlhbaseball15/electron,pombredanne/electron,fritx/electron,JussMee15/electron,gabrielPeart/electron,jjz/electron,cos2004/electron,pombredanne/electron,vipulroxx/electron,SufianHassan/electron,webmechanicx/electron,bwiggs/electron,yan-foto/electron,iftekeriba/electron,meowlab/electron,leethomas/electron,abhishekgahlot/electron,rajatsingla28/electron,astoilkov/electron,tonyganch/electron,faizalpribadi/electron,aaron-goshine/electron,brave/muon,bitemyapp/electron,adcentury/electron,ianscrivener/electron,egoist/electron,shiftkey/electron,mattdesl/electron,joaomoreno/atom-shell,rreimann/electron,timruffles/electron,yan-foto/electron,etiktin/electron,deed02392/electron,John-Lin/electron,dahal/electron,leftstick/electron,etiktin/electron,setzer777/electron,thompsonemerson/electron,adcentury/electron,Neron-X5/electron,SufianHassan/electron,bobwol/electron,eric-seekas/electron,miniak/electron,meowlab/electron,bwiggs/electron,carsonmcdonald/electron,abhishekgahlot/electron,trigrass2/electron,subblue/electron,systembugtj/electron,gabrielPeart/electron,icattlecoder/electron,saronwei/electron,bright-sparks/electron,faizalpribadi/electron,faizalpribadi/electron,vaginessa/electron,baiwyc119/electron,anko/electron,bwiggs/electron,edulan/electron,evgenyzinoviev/electron,DivyaKMenon/electron,zhakui/electron,electron/electron,fffej/electron,cos2004/electron,benweissmann/electron,fffej/electron,GoooIce/electron,jtburke/electron,baiwyc119/electron,renaesop/electron,wolfflow/electron,the-ress/electron,jaanus/electron,tylergibson/electron,LadyNaggaga/electron,minggo/electron,takashi/electron,miniak/electron,mubassirhayat/electron,dahal/electron,aichingm/electron,thomsonreuters/electron,Andrey-Pavlov/electron,ianscrivener/electron,abhishekgahlot/electron,seanchas116/electron,chrisswk/electron,bright-sparks/electron,eriser/electron,aecca/electron,gamedevsam/electron,Jonekee/electron,mrwizard82d1/electron,kostia/electron,bruce/electron,bbondy/electron,JussMee15/electron,shennushi/electron,matiasinsaurralde/electron,trankmichael/electron,coderhaoxin/electron,leethomas/electron,bitemyapp/electron,subblue/electron,neutrous/electron,gamedevsam/electron,gabriel/electron,jsutcodes/electron,rhencke/electron,sircharleswatson/electron,joaomoreno/atom-shell,jlord/electron,rhencke/electron,chrisswk/electron,stevekinney/electron,deed02392/electron,chriskdon/electron,arturts/electron,vHanda/electron,fabien-d/electron,gabriel/electron,posix4e/electron,tonyganch/electron,bpasero/electron,kcrt/electron,Floato/electron,cqqccqc/electron,posix4e/electron,aecca/electron,jacksondc/electron,christian-bromann/electron | script/lib/config.py | script/lib/config.py | #!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '432720d4613e3aac939f127fe55b9d44fea349e5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
| #!/usr/bin/env python
import platform
import sys
NODE_VERSION = 'v0.11.13'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'bb664e4665851fe923ce904e620ba43d8d010ba5'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
| mit | Python |
ddf0a5a4438531e4bfa29d8709c1c76d8ca17f59 | Add helper module for keyword-only arguments. | schandrika/volttron,schandrika/volttron,schandrika/volttron,schandrika/volttron | volttron/platform/kwonlyargs.py | volttron/platform/kwonlyargs.py | '''Support functions for implementing keyword-only arguments.
This module is designed to make it easy to support keyword-only
arguments in Python 2.7 while providing the same kind of exceptions one
would see with Python 3.x.
Basic usage:
def foo(arg1, *args, **kwargs):
# Use required context manager to convert KeyError exceptions
# to TypeError with an appropriate message.
with required:
arg2 = kwargs.pop('arg2')
arg3 = kwargs.pop('arg3')
# Provide a default to pop for optional arguments
arg4 = kwargs.pop('arg4', 'default value')
# Include the next line to disallow additional keyword args
assertempty(kwargs)
'''
__all__ = ['required', 'assertempty']
class Required(object):
'''Context manager to raise TypeError for missing required kwargs.'''
__slots__ = ()
@classmethod
def __enter__(cls):
pass
@classmethod
def __exit__(cls, exc_type, exc_value, exc_tb):
# pylint: disable=bad-context-manager
if exc_type is KeyError:
raise TypeError(
'missing a required keyword-only argument %r' % exc_value.args)
required = Required() # pylint: disable=invalid-name
def assertempty(kwargs):
'''Raise TypeError if kwargs is not empty.'''
if kwargs:
name = next(kwargs.iterkeys())
raise TypeError('got an unexpected keyword argument %r' % (name,))
| bsd-2-clause | Python | |
99d95d6ed14e912701b1f6ae26779612694590f5 | add gdw2 django task tutorial | pyjs/pyjs,gpitel/pyjs,minghuascode/pyj,spaceone/pyjs,lancezlin/pyjs,pyjs/pyjs,minghuascode/pyj,andreyvit/pyjamas,gpitel/pyjs,spaceone/pyjs,pombredanne/pyjs,pombredanne/pyjs,pyjs/pyjs,spaceone/pyjs,minghuascode/pyj,pyjs/pyjs,lovelysystems/pyjamas,andreyvit/pyjamas,certik/pyjamas,gpitel/pyjs,lancezlin/pyjs,andreyvit/pyjamas,Hasimir/pyjs,lovelysystems/pyjamas,spaceone/pyjs,pombredanne/pyjs,Hasimir/pyjs,certik/pyjamas,Hasimir/pyjs,lancezlin/pyjs,minghuascode/pyj,certik/pyjamas,certik/pyjamas,anandology/pyjamas,lovelysystems/pyjamas,anandology/pyjamas,lovelysystems/pyjamas,gpitel/pyjs,Hasimir/pyjs,pombredanne/pyjs,anandology/pyjamas,anandology/pyjamas,andreyvit/pyjamas,lancezlin/pyjs | examples/djangotasks/tasks.py | examples/djangotasks/tasks.py | from pyjamas.ui.Label import Label
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.TextBox import TextBox
from pyjamas.ui.ListBox import ListBox
from pyjamas.ui import KeyboardListener
from pyjamas.JSONService import JSONProxy
class TodoApp:
def onModuleLoad(self):
self.remote = DataService()
panel = VerticalPanel()
self.todoTextBox = TextBox()
self.todoTextBox.addKeyboardListener(self)
self.todoList = ListBox()
self.todoList.setVisibleItemCount(7)
self.todoList.setWidth("200px")
self.todoList.addClickListener(self)
panel.add(Label("Add New Todo:"))
panel.add(self.todoTextBox)
panel.add(Label("Click to Remove:"))
panel.add(self.todoList)
self.status = Label()
panel.add(self.status)
RootPanel().add(panel)
def onKeyUp(self, sender, keyCode, modifiers):
pass
def onKeyDown(self, sender, keyCode, modifiers):
pass
def onKeyPress(self, sender, keyCode, modifiers):
"""
This functon handles the onKeyPress event, and will add the item in the text box to the list when the user presses the enter key. In the future, this method will also handle the auto complete feature.
"""
if keyCode == KeyboardListener.KEY_ENTER and sender == self.todoTextBox:
id = self.remote.addTask(sender.getText(),self)
sender.setText("")
if id<0:
self.status.setText("Server Error or Invalid Response")
def onClick(self, sender):
id = self.remote.deleteTask(sender.getValue(sender.getSelectedIndex()),self)
if id<0:
self.status.setText("Server Error or Invalid Response")
def onRemoteResponse(self, response, request_info):
self.status.setText("response received")
if request_info.method == 'getTasks' or request_info.method == 'addTask' or request_info.method == 'deleteTask':
self.status.setText(self.status.getText() + "HERE!")
self.todoList.clear()
for task in response:
self.todoList.addItem(task[0])
self.todoList.setValue(self.todoList.getItemCount()-1,task[1])
else:
self.status.setText(self.status.getText() + "none!")
def onRemoteError(self, code, message, request_info):
self.status.setText("Server Error or Invalid Response: ERROR " + code + " - " + message)
class DataService(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "/services/", ["getTasks", "addTask","deleteTask"])
if __name__ == "__main__":
app = TodoApp()
app.onModuleLoad()
| apache-2.0 | Python | |
cbfb38e904c7bc75c0635d36e896feef6c44fde2 | add modbus_thread example | sourceperl/pyModbusTCP,W84TheSun/pyModbusTCP | examples/modbus_thread.py | examples/modbus_thread.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# modbus_thread
# start a thread for polling a set of registers, display result on console
import time
from threading import Thread, Lock
from pyModbusTCP.client import ModbusClient
SERVER_HOST = "localhost"
SERVER_PORT = 502
# set global
regs = []
# init a thread lock
regs_lock = Lock()
# modbus polling thread
def polling_thread():
global regs
c = ModbusClient(host=SERVER_HOST, port=SERVER_PORT)
while True:
# keep TCP open
if not c.is_open():
c.open()
reg_list = c.read_holding_registers(0,10)
if reg_list:
with regs_lock:
regs = reg_list
time.sleep(1)
# start polling thread
tp = Thread(target=polling_thread)
tp.daemon = True
tp.start()
# display loop
while True:
with regs_lock:
print(regs)
time.sleep(1)
| mit | Python | |
3b50369b089a94eb961dcc170bee7b97c0345805 | Add script to mass-comment Jenkins triggers on PR | apache/beam,lukecwik/incubator-beam,apache/beam,robertwb/incubator-beam,chamikaramj/beam,lukecwik/incubator-beam,lukecwik/incubator-beam,apache/beam,lukecwik/incubator-beam,chamikaramj/beam,chamikaramj/beam,robertwb/incubator-beam,robertwb/incubator-beam,apache/beam,apache/beam,apache/beam,robertwb/incubator-beam,robertwb/incubator-beam,chamikaramj/beam,chamikaramj/beam,chamikaramj/beam,lukecwik/incubator-beam,iemejia/incubator-beam,robertwb/incubator-beam,lukecwik/incubator-beam,apache/beam,lukecwik/incubator-beam,apache/beam,robertwb/incubator-beam,apache/beam,chamikaramj/beam,lukecwik/incubator-beam,apache/beam,robertwb/incubator-beam,apache/beam,robertwb/incubator-beam,chamikaramj/beam,robertwb/incubator-beam,iemejia/incubator-beam,lukecwik/incubator-beam,chamikaramj/beam,chamikaramj/beam,lukecwik/incubator-beam | release/src/main/scripts/mass_comment.py | release/src/main/scripts/mass_comment.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Script for mass-commenting Jenkins test triggers on a Beam PR."""
import itertools
import os
import socket
import sys
import time
import traceback
import re
import requests
from datetime import datetime
COMMENTS_TO_ADD=[
"Run Go PostCommit",
"Run Java PostCommit",
"Run Java PortabilityApi PostCommit",
"Run Java Flink PortableValidatesRunner Batch",
"Run Java Flink PortableValidatesRunner Streaming",
"Run Apex ValidatesRunner",
"Run Dataflow ValidatesRunner",
"Run Flink ValidatesRunner",
"Run Gearpump ValidatesRunner",
"Run Dataflow PortabilityApi ValidatesRunner",
"Run Samza ValidatesRunner",
"Run Spark ValidatesRunner",
"Run Python Dataflow ValidatesContainer",
"Run Python Dataflow ValidatesRunner",
"Run Python 3.5 Flink ValidatesRunner",
"Run Python 2 PostCommit",
"Run Python 3.5 PostCommit",
"Run SQL PostCommit",
"Run Go PreCommit",
"Run Java PreCommit",
"Run Java_Examples_Dataflow PreCommit",
"Run JavaPortabilityApi PreCommit",
"Run Portable_Python PreCommit",
"Run PythonLint PreCommit",
"Run Python PreCommit",
"Run Python DockerBuild PreCommit"
]
def executeGHGraphqlQuery(accessToken, query):
'''Runs graphql query on GitHub.'''
url = 'https://api.github.com/graphql'
headers = {'Authorization': 'Bearer %s' % accessToken}
r = requests.post(url=url, json={'query': query}, headers=headers)
return r.json()
def getSubjectId(accessToken, prNumber):
query = '''
query FindPullRequestID {
repository(owner:"apache", name:"beam") {
pullRequest(number:%s) {
id
}
}
}
''' % prNumber
response = executeGHGraphqlQuery(accessToken, query)
return response['data']['repository']['pullRequest']['id']
def fetchGHData(accessToken, subjectId, commentBody):
'''Fetches GitHub data required for reporting Beam metrics'''
query = '''
mutation AddPullRequestComment {
addComment(input:{subjectId:"%s",body: "%s"}) {
commentEdge {
node {
createdAt
body
}
}
subject {
id
}
}
}
''' % (subjectId, commentBody)
return executeGHGraphqlQuery(accessToken, query)
def postComments(accessToken, subjectId):
'''
Main workhorse method. Fetches data from GitHub and puts it in metrics table.
'''
for commentBody in COMMENTS_TO_ADD:
jsonData = fetchGHData(accessToken, subjectId, commentBody)
print(jsonData)
def probeGitHubIsUp():
'''
Returns True if GitHub responds to simple queries. Else returns False.
'''
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('github.com', 443))
return True if result == 0 else False
################################################################################
if __name__ == '__main__':
'''
This script is supposed to be invoked directly.
However for testing purposes and to allow importing,
wrap work code in module check.
'''
print("Started.")
if not probeGitHubIsUp():
print("GitHub is unavailable, skipping fetching data.")
exit();
print("GitHub is available start fetching data.")
accessToken = input("Enter your Github access token: ")
pr = input("Enter the Beam PR number to test (e.g. 11403): ")
subjectId = getSubjectId(accessToken, pr)
postComments(accessToken, subjectId)
print("Fetched data.")
print('Done.')
| apache-2.0 | Python | |
081297b75fdcc9415be20e84b8db19a8eae483c9 | Create match_smiley_to_cvr.py | fnielsen/cvrminer,fnielsen/cvrminer,fnielsen/cvrminer | examples/match_smiley_to_cvr.py | examples/match_smiley_to_cvr.py | """Print match and missing match between smiley and CVR."""
from __future__ import print_function
from cvrminer.cvrmongo import CvrMongo
from cvrminer.smiley import Smiley
cvr_mongo = CvrMongo()
smiley = Smiley()
cvrs = smiley.all_cvrs()
n_missing = 0
n_ok = 0
for cvr in sorted(cvrs):
company = cvr_mongo.get_company(cvr)
if company:
n_ok += 1
print('cvr {} ok'.format(cvr))
else:
n_missing += 1
print('cvr {} missing'.format(cvr))
print("Missing: {}; Ok: {}.".format(n_missing, n_ok))
| apache-2.0 | Python | |
29080fb1764f4eb75ebe1dd79460cce00527bc91 | Add example of using tractography from another pipeline. | yeatmanlab/pyAFQ,arokem/pyAFQ,arokem/pyAFQ,yeatmanlab/pyAFQ | examples/plot_other_tracking.py | examples/plot_other_tracking.py | """
=============================================
Segmenting tractography from another pipeline
=============================================
The AFQ API provides facilities to segment tractography results obtained using
other software. For example, we often use
`qsiprep <https://qsiprep.readthedocs.io/en/latest/>`_ to preprocess our data
and reconstruct tractographies with software such as
`MRTRIX <https://www.mrtrix.org/>`_. Here, we will demonstrate how to use
these reconstructions in the pyAFQ segmentation and tractometry pipeline.
"""
import os
import os.path as op
import matplotlib.pyplot as plt
import nibabel as nib
import plotly
from AFQ import api
import AFQ.data as afd
##########################################################################
# Example data
# ---------------------
# The example data we will use here is generated from the Stanford HARDI
# dataset (https://purl.stanford.edu/ng782rw8378). The calls below organize the # preprocessed data according fetches
# the results of tractography with this dataset and organizes it within
# the `~/AFQ_data` folder.
afd.organize_stanford_data()
afd.fetch_stanford_hardi_tractography()
##########################################################################
# Reorganize data
# ---------------------
# We organize the data so that it conforms with the BIDS standard for
# derivatives:
bids_path = op.join(op.expanduser('~'), 'AFQ_data', 'stanford_hardi')
tractography_path = op.join(bids_path, 'derivatives', 'my_tractography')
sub_path = op.join(tractography_path, 'sub-01', 'ses-01', 'dwi')
os.makedirs(sub_path, exist_ok=True)
os.rename(
op.join(
op.expanduser('~'),
'AFQ_data',
'stanford_hardi_tractography',
'tractography_subsampled.trk'),
op.join(
sub_path,
'sub-01_ses-01-dwi_tractography.trk'))
afd.to_bids_description(
tractography_path,
**{"Name": "my_tractography",
"PipelineDescription": {"Name": "my_tractography"}})
##########################################################################
# Once this is done, you should have a folder in your home directory that
# looks like this:
#
# | stanford_hardi
# | ├── dataset_description.json
# | └── derivatives
# | ├── freesurfer
# | │ ├── dataset_description.json
# | │ └── sub-01
# | │ └── ses-01
# | │ └── anat
# | │ ├── sub-01_ses-01_T1w.nii.gz
# | │ └── sub-01_ses-01_seg.nii.gz
# | ├── my_tractography
# | | ├── dataset_description.json
# | │ └── sub-01
# | │ └── ses-01
# | │ └── dwi
# | │ └── sub-01_ses-01-dwi_tractography.trk
# | └── vistasoft
# | ├── dataset_description.json
# | └── sub-01
# | └── ses-01
# | └── dwi
# | ├── sub-01_ses-01_dwi.bvals
# | ├── sub-01_ses-01_dwi.bvecs
# | └── sub-01_ses-01_dwi.nii.gz
##########################################################################
# Now, we can run AFQ, pointing to the derivatives of the
# "my_tractography" pipeline as inputs:
bundle_names = ["SLF", "ARC", "CST", "FP"]
my_afq = api.AFQ(
bids_path,
dmriprep='vistasoft',
bundle_info=bundle_names,
custom_tractography_bids_filters={
"suffix": "tractography",
"scope": "my_tractography"
})
my_afq.tract_profiles
| bsd-2-clause | Python | |
1f2917dd4146c2ddb6c0f5532e4aaa63f19f1a44 | Create 1.py | lavosprime/euler | python/problems/1/1.py | python/problems/1/1.py | sum = 0
for number in range(1000):
if not (number % 3 or number % 5):
sum = sum + number
print(sum)
| mit | Python | |
9641234ac5897ec3f1a5f6cf0b5a822e7b103ae8 | Update consecutive-numbers-sum.py | tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode | Python/consecutive-numbers-sum.py | Python/consecutive-numbers-sum.py | # Time: O(sqrt(n))
# Space: O(1)
# Given a positive integer N,
# how many ways can we write it as a sum of
# consecutive positive integers?
#
# Example 1:
#
# Input: 5
# Output: 2
# Explanation: 5 = 5 = 2 + 3
# Example 2:
#
# Input: 9
# Output: 3
# Explanation: 9 = 9 = 4 + 5 = 2 + 3 + 4
# Example 3:
#
# Input: 15
# Output: 4
# Explanation: 15 = 15 = 8 + 7 = 4 + 5 + 6 = 1 + 2 + 3 + 4 + 5
# Note: 1 <= N <= 10 ^ 9.
class Solution(object):
def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
# x + x+1 + x+2 + ... + x+l-1 = N = 2^k * M
# => l*x + (l-1)*l/2 = N
# => x = (N -(l-1)*l/2)/l= 2^k * M/l - (l-1)/2 is integer
# => l could be 2 or any odd factor of M (excluding M)
# => the answer is the number of all odd factors of M
# if prime factorization of N is 2^k * p1^a * p2^b * ..
# => answer is the number of all odd factors = (a+1) * (b+1) * ...
result = 1
while N % 2 == 0:
N /= 2
i = 3
while i*i <= N:
count = 0
while N % i == 0:
N /= i
count += 1
result *= count+1
i += 2
if N > 1:
result *= 2
return result
| # Time: O(sqrt(n))
# Space: O(1)
# Given a positive integer N,
# how many ways can we write it as a sum of
# consecutive positive integers?
#
# Example 1:
#
# Input: 5
# Output: 2
# Explanation: 5 = 5 = 2 + 3
# Example 2:
#
# Input: 9
# Output: 3
# Explanation: 9 = 9 = 4 + 5 = 2 + 3 + 4
# Example 3:
#
# Input: 15
# Output: 4
# Explanation: 15 = 15 = 8 + 7 = 4 + 5 + 6 = 1 + 2 + 3 + 4 + 5
# Note: 1 <= N <= 10 ^ 9.
class Solution(object):
def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
# if prime factorization of N is 2^k * p1^a * p2^b * ..
# => result is the number of all odd factors = (a+1) * (b+1) * ...
result = 1
while N % 2 == 0:
N /= 2
i = 3
while i*i <= N:
count = 0
while N % i == 0:
N /= i
count += 1
result *= count+1
i += 2
if N > 1:
result *= 2
return result
| mit | Python |
5c084bf10cb8feda62ac46939b3508a8c0e6a080 | load csv files generic | Algentile/CryptoForecaster | parsers/data_parser.py | parsers/data_parser.py | import numpy as np
from tflearn.data_utils import load_csv
def parse_csv(csv_file):
features, labels = load_csv(csv_file, target_column=4, columns_to_ignore=None, has_header=True)
feature_tensor = np.array(features).reshape(len(features[0]), len(features))
label_tensor = np.array(labels).reshape(len(labels), 1)
return feature_tensor, label_tensor | mit | Python | |
8be9ab8de9558efa6ded7d184a3cdc8dad43e4ff | Add an ajax_aware_render utility. | lincolnloop/django-jsonit | jsonit/utils.py | jsonit/utils.py | import os
from django.http import HttpResponse
from django.template import RequestContext, loader
def ajax_aware_render(request, template_list, extra_context=None, **kwargs):
if isinstance(template_list, basestring):
template_list = [template_list]
if request.is_ajax():
new_template_list = []
for name in template_list:
new_template_list.append('%s.ajax.%s' % os.path.splitext(name))
new_template_list.append(name)
template_list = new_template_list
c = RequestContext(request, extra_context)
t = loader.select_template(template_list)
return HttpResponse(t.render(c), **kwargs)
| bsd-3-clause | Python | |
a21add52424d81a36f5a34d067f70cfb2066636f | Add process module | gregthedoe/androtoolbox | androtoolbox/process.py | androtoolbox/process.py | import attr
import re
from .adb import adb
@attr.s
class Process(object):
name = attr.ib()
user = attr.ib()
pid = attr.ib(convert=int)
parent_pid = attr.ib(convert=int)
vsize = attr.ib(convert=int)
rss = attr.ib(convert=int)
wchan = attr.ib()
pc = attr.ib()
state = attr.ib()
@classmethod
def parse_from_line(cls, line):
user, pid, ppid, vsize, rss, wchan, pc, state, name = line.split()
return cls(name, user, pid, ppid, vsize, rss, wchan, pc, state)
def get_running_processes(filter=None):
"""
Get all running processes, (optionally) matching a filter.
:param filter: An optional regex to filter process names
:type filter: str | None
:rtype: list(Process)
"""
raw_data = adb.shell('ps').splitlines()[1:] # The first line is the table headers
processes = [Process.parse_from_line(raw_data_line) for raw_data_line in raw_data]
if filter:
processes = [p for p in processes if re.search(filter, p.name)]
return processes
def pid_of(process):
"""
Get the PID of a running process
:param process: The process name
"""
processes = get_running_processes(process)
if len(processes) != 1:
return None
return processes[0]
def kill(process):
"""
Kill a running process. If the process
note: Uses su
:param process: The process' name or pid
"""
try:
pid = int(process)
except ValueError:
pid = pid_of(process)
if pid:
adb.shell('kill -9 %s', use_su=True)
| mit | Python | |
add720894d1d29eb80ee99986c7e8473ef4f3067 | upgrade script for translations works for published items (#1431) | superdesk/superdesk-core,petrjasek/superdesk-core,ioanpocol/superdesk-core,ioanpocol/superdesk-core,superdesk/superdesk-core,mdhaman/superdesk-core,petrjasek/superdesk-core,petrjasek/superdesk-core,superdesk/superdesk-core,mdhaman/superdesk-core,superdesk/superdesk-core,petrjasek/superdesk-core,mdhaman/superdesk-core,ioanpocol/superdesk-core | superdesk/data_updates/00015_20181127-105425_archive.py | superdesk/data_updates/00015_20181127-105425_archive.py | # -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Author : tomas
# Creation: 2018-11-27 10:54
from superdesk.commands.data_updates import DataUpdate
from superdesk import get_resource_service
# This upgrade script does the same as the previous one 00014_20181114-153727_archive.py
# except this works across multiple collections
def get_root_nodes(tree_items):
root_nodes = []
for key in tree_items:
node = tree_items[key]
if node.parent is None:
root_nodes.append(node)
return root_nodes
def get_ids_recursive(list_of_nodes, resource):
# walks the tree and returns ids of items with a specified resource
ids = []
for node in list_of_nodes:
if len(node.children) > 0:
ids.extend(get_ids_recursive(node.children, resource))
if node.resource == resource:
ids.append(node.id)
return ids
class TreeNode:
def __init__(self, id):
self.id = id
self.parent = None
self.resource = None
self.children = []
class DataUpdate(DataUpdate):
resource = 'archive' # will use multiple resources, keeping this here so validation passes
def forwards(self, mongodb_collection, mongodb_database):
tree_items = {}
# `translated_from` can refer to archive['_id'] or published['item_id']
for resource in ['archive', 'published']:
collection = mongodb_database[resource]
# building multiple trees
for item in collection.find({'translated_from': {'$exists': True}}):
node_id = item['_id']
if node_id not in tree_items:
tree_items[node_id] = TreeNode(node_id)
node = tree_items[node_id]
node.resource = resource
parent_id = item['translated_from']
if parent_id not in tree_items:
tree_items[parent_id] = TreeNode(parent_id)
node.parent = tree_items[parent_id]
node.parent.children.append(node)
# processing trees
for root_node in get_root_nodes(tree_items):
updates = {'translation_id': root_node.id}
for resource in ['archive', 'published']:
service = get_resource_service(resource)
ids = get_ids_recursive([root_node], resource)
for item_id in ids:
item = service.find_one(req=None, _id=item_id)
if item is not None:
print(service.system_update(item_id, updates, item))
def backwards(self, mongodb_collection, mongodb_database):
raise NotImplementedError()
| agpl-3.0 | Python | |
56c955b5700eb9e133024c9f51e39af9b065dfb1 | Add neopixel rainbow demo. | mathisgerdes/microbit-macau | demos/rainbow.py | demos/rainbow.py | # Add your Python code here. E.g.
from microbit import *
import neopixel
np = neopixel.NeoPixel(pin13, 12)
rainbow_raw = [
(255, 0, 0),
(255, 127, 0),
(255, 255, 0),
(127, 255, 0),
(0, 255, 0),
(0, 255, 127),
(0, 255, 255),
(0, 127, 255),
(0, 0, 255),
(127, 0, 255),
(255, 0, 255),
(255, 0, 127),
]
dim = lambda r, g, b: (r//20, g//20, b//20)
rainbow = [dim(*c) for c in rainbow_raw]
shift = 0
while True:
for i in range(6):
np[i] = rainbow[(i + shift) % len(rainbow)]
np[i+6] = rainbow[(i + shift) % len(rainbow)]
np.show()
if abs(accelerometer.get_y()) >= 90:
shift += 1 if accelerometer.get_y() > 0 else -1
shift %= len(rainbow)
sleep(100)
| mit | Python | |
255d9b002b820d1c475d2434858fd5ab3c6847cf | add SeriesStim | tyarkoni/featureX,tyarkoni/pliers | pliers/stimuli/misc.py | pliers/stimuli/misc.py | """Miscellaneous Stim classes."""
import numpy as np
import pandas as pd
from .base import Stim
class SeriesStim(Stim):
'''Represents a pandas Series as a pliers Stim.
Args:
data (dict, pd.Series, array-like): A dictionary, pandas Series, or any
other iterable (e.g., list or 1-D numpy array) that can be coerced
to a pandas Series.
filename (str, optional): Path or URL to data file. Must be readable
using pd.read_csv().
onset (float): Optional onset of the SeriesStim (in seconds) with
respect to some more general context or timeline the user wishes
to keep track of.
duration (float): Optional duration of the SeriesStim, in seconds.
order (int): Optional order of stim within some broader context.
url (str): Optional URL to read data from. Must be readable using
pd.read_csv().
column (str): If filename or url is passed, defines the name of the
column in the data source to read in as data.
name (str): Optional name to give the SeriesStim instance. If None
is provided, the name will be derived from the filename if one is
defined. If no filename is defined, name will be an empty string.
pd_args: Optional keyword arguments passed onto pd.read_csv() (e.g.,
to control separator, header, etc.).
'''
def __init__(self, data=None, filename=None, onset=None, duration=None,
order=None, url=None, column=None, name=None, **pd_args):
if data is None:
if filename is None and url is None:
raise ValueError("No data provided! One of the data, filename,"
"or url arguments must be passed.")
source = data or url
data = pd.read_csv(source, squeeze=True, **pd_args)
if isinstance(data, pd.DataFrame):
if column is None:
raise ValueError("Data source contains more than one "
"column; please specify which column to "
"use by passing the 'column' argument.")
data = data.loc[:, column]
data = pd.Series(data)
self.data = data
super().__init__(filename, onset, duration, order, name)
def save(self, path):
self.data.to_csv(path)
| bsd-3-clause | Python | |
d270330375060d0bd8694bc8a2ea8bdbb3762586 | add show_single_event for debugging | deepjets/deepjets,deepjets/deepjets,deepjets/deepjets | show_single_event.py | show_single_event.py | from deepjets.generate import get_generator_input, generate
gen_input = get_generator_input('pythia', 'w.config', random_state=1)
for event in generate(gen_input, 1):
print event.jets
print event.subjets
print event.subjets.shape
print event.trimmed_constit
print event.trimmed_constit.shape
| bsd-3-clause | Python | |
974ebd337c00a8b4a07991983eea0b9b60e1af08 | Add example binary sink | librato/statsite,twitter-forks/statsite,tsunli/statsite,jmptrader/statsite,librato/statsite,remind101/statsite,sleepybishop/statsite,drawks/statsite,Instagram/statsite,sleepybishop/statsite,sleepybishop/statsite,librato/statsite,johnkeates/statsite,nspragg/statsite,bossjones/statsite,lazybios/statsite,drawks/statsite,remind101/statsite,nspragg/statsite,johnkeates/statsite,ualtinok/statsite,armon/statsite,u-s-p/statsite,johnkeates/statsite,deseretdigital/statsite-proxy,bossjones/statsite,tsunli/statsite,lazybios/statsite,kuba--/statsite,remind101/statsite,nwangtw/statsite,tsunli/statsite,twitter-forks/statsite,twitter-forks/statsite,Instagram/statsite,nwangtw/statsite,bossjones/statsite,ualtinok/statsite,jmptrader/statsite,zeedunk/statsite,zeedunk/statsite,zeedunk/statsite,tsunli/statsite,sleepybishop/statsite,kuba--/statsite,theatrus/statsite,nwangtw/statsite,jmptrader/statsite,drawks/statsite,sleepybishop/statsite,nspragg/statsite,nwangtw/statsite,ualtinok/statsite,twitter-forks/statsite,u-s-p/statsite,lazybios/statsite,statsite/statsite,johnkeates/statsite,statsite/statsite,librato/statsite,johnkeates/statsite,armon/statsite,kuba--/statsite,armon/statsite,remind101/statsite,nspragg/statsite,ualtinok/statsite,nspragg/statsite,deseretdigital/statsite-proxy,nspragg/statsite,armon/statsite,drawks/statsite,jmptrader/statsite,remind101/statsite,statsite/statsite,Instagram/statsite,drawks/statsite,twitter-forks/statsite,theatrus/statsite,tsunli/statsite,nwangtw/statsite,ualtinok/statsite,lazybios/statsite,zeedunk/statsite,theatrus/statsite,bossjones/statsite,twitter-forks/statsite,armon/statsite,zeedunk/statsite,nwangtw/statsite,statsite/statsite,librato/statsite,drawks/statsite,u-s-p/statsite,kuba--/statsite,tsunli/statsite,kuba--/statsite,jmptrader/statsite,kuba--/statsite,theatrus/statsite,ualtinok/statsite,zeedunk/statsite,statsite/statsite,u-s-p/statsite,deseretdigital/statsite-proxy,lazybios/statsite,librato/statsite,theatrus/statsite,u-s-p/statsite,u-s-p/statsite,remind101/statsite,jmptrader/statsite | sinks/binary_sink.py | sinks/binary_sink.py | import struct
import sys
# Line format. We have:
# 8 byte unsigned timestamp
# 1 byte metric type
# 1 byte value type
# 2 byte key length
# 8 byte value
LINE = struct.Struct("<QBBHd")
PREFIX_SIZE = 20
TYPE_MAP = {
1: "kv",
2: "counter",
3: "timer"
}
VAL_TYPE_MAP = {
0: "kv",
1: "sum",
2: "sum sq",
3: "mean",
4: "count",
5: "stddev",
6: "min",
7: "max",
128: "percentile"
}
# Pre-compute all the possible percentiles
for x in xrange(1, 100):
VAL_TYPE_MAP[128 | x] = "P%02d" % x
def main():
while True:
# Read the prefix
prefix = sys.stdin.read(20)
if not prefix or len(prefix) != 20:
return
# Unpack the line
(ts, type, val_type, key_len, val) = LINE.unpack(prefix)
type = TYPE_MAP[type]
val_type = VAL_TYPE_MAP[val_type]
# Read the key
key = sys.stdin.read(key_len)
# Print
print ts, type, val_type, key, val
if __name__ == "__main__":
main()
| bsd-3-clause | Python | |
420ebb50cfb5a366b35d058ad6018857b899a19e | Add function approximator to deal with off-switch | rmoehn/cartpole | hiora_cartpole/offswitch_hfa.py | hiora_cartpole/offswitch_hfa.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# HO … higher-order
class SliceHOFeatureVec(object):
def __init__(self, slice_i, entries_per_slice, feature_vec):
self.feature_vec = feature_vec
self.slice = slice(slice_i * entries_per_slice,
(slice_i+1) * entries_per_slice)
def dot(self, vec):
return self.feature_vec.dot(vec[self.slice])
def add_to(self, vec):
"""
Warning: Modifies vec.
"""
self.feature_vec.add_to(vec[self.slice])
def alphabounds_diffdot(self, prev, elig):
"""
Credits: http://people.cs.umass.edu/~wdabney/papers/alphaBounds.pdf
"""
return self.dot(elig) - prev.dot(elig)
def make_feature_vec(feature_vec, n_weights):
def feature_vec_inner(state, action):
return SliceHOFeatureVec(state[0], n_weights,
feature_vec(state[1], action))
return n_weights * 2, feature_vec_inner
| mit | Python | |
2db2727dcccf81c3dca2e86efabd8e40afb223d1 | Automate Transfers: Add another pre-transfer script | artefactual/automation-tools,artefactual/automation-tools,finoradin/automation-tools | transfers/pre-transfer/add_metadata.py | transfers/pre-transfer/add_metadata.py | #!/usr/bin/env python2
import json
import os
import sys
def main(transfer_path):
basename = os.path.basename(transfer_path)
try:
_, dc_id, _ = basename.split('---')
except ValueError:
return 1
metadata = [
{
'parts': 'objects',
'dc.identifier': dc_id,
}
]
metadata_path = os.path.join(transfer_path, 'metadata')
if not os.path.exists(metadata_path):
os.makedirs(metadata_path)
metadata_path = os.path.join(metadata_path, 'metadata.json')
with open(metadata_path, 'w') as f:
json.dump(metadata, f)
return 0
if __name__ == '__main__':
transfer_path = sys.argv[1]
sys.exit(main(transfer_path))
| agpl-3.0 | Python | |
649860778dd61bdcf8d2863222a98c97cef425c6 | Add tool for parsing desired capabilities from a file | seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/seleniumspot,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase,seleniumbase/SeleniumBase | seleniumbase/core/capabilities_parser.py | seleniumbase/core/capabilities_parser.py | import re
def get_desired_capabilities(cap_file):
if not cap_file.endswith('.py'):
raise Exception("\n\n`%s` is not a Python file!\n\n" % cap_file)
f = open(cap_file, 'r')
all_code = f.read()
f.close()
desired_capabilities = {}
num_capabilities = 0
code_lines = all_code.split('\n')
for line in code_lines:
if "desired_cap = {" in line:
line = line.split("desired_cap = {")[1]
# 'key' : 'value'
data = re.match(r"^\s*'([\S\s]+)'\s*:\s*'([\S\s]+)'\s*[,}]?\s*$", line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# "key" : "value"
data = re.match(r'^\s*"([\S\s]+)"\s*:\s*"([\S\s]+)"\s*[,}]?\s*$', line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# 'key' : "value"
data = re.match(
r'''^\s*'([\S\s]+)'\s*:\s*"([\S\s]+)"\s*[,}]?\s*$''', line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# "key" : 'value'
data = re.match(
r'''^\s*"([\S\s]+)"\s*:\s*'([\S\s]+)'\s*[,}]?\s*$''', line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# caps['key'] = 'value'
data = re.match(r"^\s*caps\['([\S\s]+)'\]\s*=\s*'([\S\s]+)'\s*$", line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# caps["key"] = "value"
data = re.match(r'^\s*caps\["([\S\s]+)"\]\s*=\s*"([\S\s]+)"\s*$', line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# caps['key'] = "value"
data = re.match(
r'''^\s*caps\['([\S\s]+)'\]\s*=\s*"([\S\s]+)"\s*$''', line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
# caps["key"] = 'value'
data = re.match(
r'''^\s*caps\["([\S\s]+)"\]\s*=\s*'([\S\s]+)'\s*$''', line)
if data:
key = data.group(1)
value = data.group(2)
desired_capabilities[key] = value
num_capabilities += 1
continue
if num_capabilities == 0:
raise Exception("Unable to parse desired capabilities file!")
return desired_capabilities
| mit | Python | |
29555289b28e63655e5bb6fa89d163b5e3022827 | add supervised loss as separate term | 255BITS/HyperGAN,255BITS/HyperGAN | hypergan/losses/supervised.py | hypergan/losses/supervised.py | import tensorflow as tf
from hypergan.util.ops import *
from hypergan.util.hc_tf import *
import hyperchamber as hc
def config():
selector = hc.Selector()
selector.set("reduce", [tf.reduce_mean])#reduce_sum, reduce_logexp work
selector.set('create', create)
selector.set('batch_norm', layer_norm_1)
return selector.random_config()
def create(config, gan):
batch_norm = config.batch_norm
batch_size = gan.config.batch_size
num_classes = gan.config.y_dims
net = gan.graph.d_real
net = linear(net, num_classes, scope="d_fc_end", stddev=0.003)
net = batch_norm(batch_size, name='d_bn_end')(net)
d_class_loss = tf.nn.softmax_cross_entropy_with_logits(net,gan.graph.y)
gan.graph.d_class_loss=tf.reduce_mean(d_class_loss)
return [d_class_loss, None]
| mit | Python | |
e1478f694d6ad422a87e03f71a79a8c1b5e77c5c | build for the entire framework | sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia | Sketches/AM/KPIFramework/setup.py | Sketches/AM/KPIFramework/setup.py | #!/usr/bin/env python
#
# (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
from distutils.core import setup
setup(name = "KPI",
version = "0.1.0",
description = "KPI Framework for building secure streaming server",
author = "Anagha Mudigonda & Kamaelia Contributors",
author_email = "anagha_m@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/",
packages = ["KPI",
"KPI.Client",
"KPI.Server",
"KPI.Crypto",
"KPI.DB",
""],
long_description = """
"""
)
| apache-2.0 | Python | |
53306793268cb31944d42caf95c275afcbe97e6d | Add migration for creating the Professional Certificate program type | edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery | course_discovery/apps/edx_catalog_extensions/migrations/0002_create_professional_certificate_program_type.py | course_discovery/apps/edx_catalog_extensions/migrations/0002_create_professional_certificate_program_type.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-19 19:51
from __future__ import unicode_literals
from django.db import migrations
PAID_SEAT_TYPES = ('credit', 'professional', 'verified',)
PROGRAM_TYPE = 'Professional Certificate'
def add_program_type(apps, schema_editor):
SeatType = apps.get_model('course_metadata', 'SeatType')
ProgramType = apps.get_model('course_metadata', 'ProgramType')
seat_types = SeatType.objects.filter(slug__in=PAID_SEAT_TYPES)
program_type, __ = ProgramType.objects.update_or_create(name=PROGRAM_TYPE)
program_type.applicable_seat_types.clear()
program_type.applicable_seat_types.add(*seat_types)
program_type.save()
def drop_program_type(apps, schema_editor):
ProgramType = apps.get_model('course_metadata', 'ProgramType')
ProgramType.objects.filter(name=PROGRAM_TYPE).delete()
class Migration(migrations.Migration):
dependencies = [
('edx_catalog_extensions', '0001_squashed_0003_create_publish_to_marketing_site_flag'),
]
operations = [
migrations.RunPython(add_program_type, drop_program_type)
]
| agpl-3.0 | Python | |
8b894a02cf1d271b7df65e1c3efcac499100ed51 | Add submission form tests | Nikola-K/django_reddit,Nikola-K/django_reddit,Nikola-K/django_reddit | reddit/tests/test_submission.py | reddit/tests/test_submission.py | from django.test import TestCase, Client
from reddit.forms import SubmissionForm
class TestSubmissionForm(TestCase):
def test_full_valid_submission(self):
test_data = {
'title': 'submission_title',
'url': 'http://example.com',
'text': 'submission text'
}
form = SubmissionForm(data=test_data)
self.assertTrue(form.is_valid())
def test_minimum_data_required(self):
test_data = {
'title': 'submission title'
}
form = SubmissionForm(data=test_data)
self.assertTrue(form.is_valid())
def test_invalid_data(self):
test_data = {
'title': '.' * 300,
'url': 'notaurl',
'text': '.' * 5001
}
form = SubmissionForm(data=test_data)
self.assertEqual(form.errors['title'], [u"Ensure this value has at most 250 characters (it has 300)."])
self.assertEqual(form.errors['url'], [u"Enter a valid URL."])
self.assertEqual(form.errors['text'], [u"Ensure this value has at most 5000 characters (it has 5001)."])
self.assertFalse(form.is_valid())
| apache-2.0 | Python | |
d2802eebe9311243aabc5954f26719fa5544b378 | Create matchingBrackets.py | prashantas/MyDataScience | GeneralPython/PyDataStructure/matchingBrackets.py | GeneralPython/PyDataStructure/matchingBrackets.py | # https://www.geeksforgeeks.org/check-for-balanced-parentheses-in-an-expression/
def areParanthesisBalanced(expr):
stack = list()
for i,chr in enumerate(expr):
#print(i, chr)
if chr in ['(','{','[']:
stack.append(chr)
continue
# IF current current character is not opening
# bracket, then it must be closing. So stack
# cannot be empty at this point.
if len(stack) == 0 :
return False
if chr == ')':
x = stack.pop()
if x != '(':
return False
elif chr == '}':
x = stack.pop()
if x != '{':
return False
elif chr == ']':
x = stack.pop()
if x != '[':
return False
return len(stack) == 0
if __name__ == '__main__':
print("#########")
str__ = "{()}[]"
print(areParanthesisBalanced(str__))
| bsd-2-clause | Python | |
6e9a789aa3113403d6d60ca662605506ce70c4d1 | Add empty Resources module. | andela-akiura/bucketlist | app/api_v1/resources.py | app/api_v1/resources.py | """This module contains the resources to be served on the endpoints."""
| mit | Python | |
faa1c167e6551da738f2039ef9e9373bde50ab41 | Add unittest utils. | eronde/py_word_suggest,eronde/vim_suggest,eronde/py_word_suggest,eronde/vim_suggest | app/tests/test_utils.py | app/tests/test_utils.py | import unittest
import re
from app.util.utils import *
class utilsTest(unittest.TestCase):
"""Docstring for decorationsTest. """
def setUp(self):
""" decorators: setup
"""
pass
def tearDown(self):
pass
def test_is_empty(self):
"""utils, is_empty: Check if an object is empty or contains spaces
:returns: TODO
"""
self.assertTrue(is_empty(''))
if __name__ == '__main__':
unittest.main()
| mit | Python | |
d9486bc6180a2dfe38a953eb84184e0410e1cb66 | Add a Quartz backend for the null toolkit | tommy-u/enable,tommy-u/enable,tommy-u/enable,tommy-u/enable | enthought/enable/null/quartz.py | enthought/enable/null/quartz.py | #------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
import numpy as np
from enthought.kiva.fonttools import Font
from enthought.kiva.quartz import ABCGI
class NativeScrollBar(object):
pass
class Window(object):
pass
CompiledPath = ABCGI.CGMutablePath
class GraphicsContext(ABCGI.CGLayerContext):
def __init__(self, size_or_array, window_gc=None, *args, **kwds):
gc = window_gc
if not gc:
# Create a tiny base context to spawn the CGLayerContext from.
# We are better off making our Layer from the window gc since
# the data formats will match and so it will be faster to draw the
# layer.
gc = ABCGI.CGBitmapContext((1,1))
if isinstance(size_or_array, np.ndarray):
# Initialize the layer with an image.
image = ABCGI.CGImage(size_or_array)
width = image.width
height = image.height
else:
# No initialization.
image = None
width, height = size_or_array
ABCGI.CGLayerContext.__init__(self, gc, (width, height))
if image is not None:
self.draw_image(image)
@classmethod
def create_from_gc(klass, gc, size_or_array, *args, **kwds):
return klass(size_or_array, gc, *args, **kwds)
def font_metrics_provider():
gc = GraphicsContext((1, 1))
gc.set_font(Font())
return gc
| bsd-3-clause | Python | |
5f9a2fe783891dd5a1f926060fcfa2561150d840 | add cleese pipeline runner | adamginsburg/APEX_CMZ_H2CO,keflavich/APEX_CMZ_H2CO,keflavich/APEX_CMZ_H2CO,adamginsburg/APEX_CMZ_H2CO | reduction/run_pipeline_cleese.py | reduction/run_pipeline_cleese.py | import make_apex_cubes
from os.path import join
root = '/scratch/aginsbur/apex/'
rawpath = join(root,'raw/')
reducedpath = join(root,'reduced/')
make_apex_cubes.june2013datapath = rawpath
make_apex_cubes.june2013path = join(reducedpath,'june2013/')
make_apex_cubes.h2copath = join(reducedpath, 'h2co_cubes/')
make_apex_cubes.mergepath = join(reducedpath, 'merged_datasets/')
make_apex_cubes.aorawpath = rawpath
make_apex_cubes.aopath = join(reducedpath, '2010_reduced/')
make_apex_cubes.diagplotdir = join(root,'diagnostic_plots/')
make_apex_cubes.do_everything()
| bsd-3-clause | Python | |
a659f0f8f4672933fc36cecfe62c65366c496f07 | Add a package for VarDictJava@1.5.1 (#5626) | LLNL/spack,matthiasdiener/spack,mfherbst/spack,matthiasdiener/spack,lgarren/spack,krafczyk/spack,LLNL/spack,lgarren/spack,iulian787/spack,krafczyk/spack,EmreAtes/spack,EmreAtes/spack,mfherbst/spack,EmreAtes/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,krafczyk/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack,matthiasdiener/spack,EmreAtes/spack,LLNL/spack,skosukhin/spack,krafczyk/spack,skosukhin/spack,skosukhin/spack,lgarren/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,mfherbst/spack,LLNL/spack,matthiasdiener/spack,skosukhin/spack,skosukhin/spack,mfherbst/spack,lgarren/spack,tmerrick1/spack,lgarren/spack,tmerrick1/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/vardictjava/package.py | var/spack/repos/builtin/packages/vardictjava/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
class Vardictjava(Package):
"""VarDictJava is a variant discovery program written in Java.
It is a partial Java port of VarDict variant caller."""
homepage = "https://github.com/AstraZeneca-NGS/VarDictJava"
url = "https://github.com/AstraZeneca-NGS/VarDictJava/releases/download/v1.5.1/VarDict-1.5.1.tar"
version('1.5.1', '8c0387bcc1f7dc696b04e926c48b27e6')
depends_on('java@8:', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('bin/VarDict', prefix.bin)
mkdirp(prefix.lib)
files = [x for x in glob.glob("lib/*jar")]
for f in files:
install(f, prefix.lib)
| lgpl-2.1 | Python | |
e7b40a6f10cd70c73134829403a87068df4665ff | Add 101-symmetric-tree.py, solve it both recursively and iteratively | mvj3/leetcode | 101-symmetric-tree.py | 101-symmetric-tree.py | """
Question:
Symmetric Tree
Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
For example, this binary tree is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following is not:
1
/ \
2 2
\ \
3 3
Note:
Bonus points if you could solve it both recursively and iteratively.
confused what "{1,#,2,3}" means? > read more on how binary tree is serialized on OJ.
Performance:
1. Total Accepted: 74418 Total Submissions: 232487 Difficulty: Easy
2. SolutionRecursive # => Your runtime beats 81.30% of python submissions.
3. SolutionIterate # => Your runtime beats 46.96% of python submissions.
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __repr__(self):
return "<TreeNode#{}>".format(self.val)
class SolutionRecursive(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
# check them are same value, but opposite directions.
def func(left_node, right_node):
# 1. check current
if left_node is None and right_node is None:
return True
# based on the first case
if left_node is None or right_node is None:
return False
# 2. starting recursive
if not func(left_node.left, right_node.right):
return False
if not func(left_node.right, right_node.left):
return False
return left_node.val == right_node.val
if root is None:
return True
return func(root.left, root.right)
class SolutionIterate(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
same_level_nodes = [root.left, root.right]
# Make sure None values still work between the left and the right nodes.
get_value_with_fix_none_node = lambda n: (n or TreeNode(n)).val
while same_level_nodes:
# check it's the same at the same level
if len(same_level_nodes) % 2 != 0:
return False
values = map(get_value_with_fix_none_node, same_level_nodes)
left_values = values[0:len(values) / 2]
right_sorted_values = list(reversed(values[len(values) / 2:]))
if left_values != right_sorted_values:
return False
# prepare the next level
tmp = []
for node in same_level_nodes:
# yes, center part of current level are None values.
if node is None:
continue
tmp += [node.left, node.right]
same_level_nodes = tmp
return True
Solution = SolutionRecursive
Solution = SolutionIterate
# Good case
l1 = TreeNode(1)
l2l = TreeNode(2)
l2r = TreeNode(2)
l3l = TreeNode(3)
l3r = TreeNode(3)
l4l = TreeNode(4)
l4r = TreeNode(4)
l1.left = l2l
l1.right = l2r
l2l.left = l3l
l2l.right = l4l
l2r.right = l3r
l2r.left = l4r
assert Solution().isSymmetric(l1) is True
# Bad case (just not mirror, both 3 are right.)
n1 = TreeNode(1)
n2l = TreeNode(2)
n2r = TreeNode(2)
n3l = TreeNode(3)
n3r = TreeNode(3)
n1.left = n2l
n1.right = n2r
n2l.left = n3l
n2l.right = n3r
assert Solution().isSymmetric(n1) is False
| mit | Python | |
cc582dd4b435ba06dc140b1ca96b688871e36abb | Add mock python package. | mfherbst/spack,skosukhin/spack,krafczyk/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack,TheTimmy/spack,LLNL/spack,tmerrick1/spack,skosukhin/spack,skosukhin/spack,lgarren/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,mfherbst/spack,TheTimmy/spack,TheTimmy/spack,mfherbst/spack,iulian787/spack,TheTimmy/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,iulian787/spack,krafczyk/spack,lgarren/spack,tmerrick1/spack,lgarren/spack,skosukhin/spack,matthiasdiener/spack,skosukhin/spack,EmreAtes/spack,iulian787/spack,LLNL/spack,lgarren/spack,lgarren/spack,krafczyk/spack,LLNL/spack,EmreAtes/spack,matthiasdiener/spack,TheTimmy/spack,tmerrick1/spack,krafczyk/spack,matthiasdiener/spack,tmerrick1/spack,matthiasdiener/spack,EmreAtes/spack,LLNL/spack | var/spack/repos/builtin.mock/packages/python/package.py | var/spack/repos/builtin.mock/packages/python/package.py | ##############################################################################
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Python(Package):
"""Dummy Python package to demonstrate preferred versions."""
homepage = "http://www.python.org"
url = "http://www.python.org/ftp/python/2.7.8/Python-2.7.8.tgz"
extendable = True
version('3.5.1', 'be78e48cdfc1a7ad90efff146dce6cfe')
version('3.5.0', 'a56c0c0b45d75a0ec9c6dee933c41c36')
version('2.7.11', '6b6076ec9e93f05dd63e47eb9c15728b', preferred=True)
version('2.7.10', 'd7547558fd673bd9d38e2108c6b42521')
version('2.7.9', '5eebcaa0030dc4061156d3429657fb83')
version('2.7.8', 'd4bca0159acb0b44a781292b5231936f')
def install(self, spec, prefix):
pass
| lgpl-2.1 | Python | |
f718f852bcc9be7d7ff57b8a0188499d5b1c9f99 | Create pyglatin.py | eringrace/hello-world | pyglatin.py | pyglatin.py | print "Welcome to the Pig Latin Translator! \n"
pyg = "ay"
original = raw_input("Enter a word: ")
if len(original) > 0 and original.isalpha():
word = original.lower()
first = word[0]
if word[0] != "a" or "e" or "i" or "o" or "u":
new_word = word + first + pyg
new_word = new_word[1:]
else:
new_word = word + pyg
new_word = new_word[0:]
print "\n"
print new_word
elif len(original) == 0:
print "Um, I'm waiting! \n"
elif original.isalpha() == False:
print "No, not l33t. A WORD, please. \n"
else:
print "Wow, you broke the program. How the hell did you do that?! \n"
| mit | Python | |
a5edbf04345653b18bdb63ed9bd63625689b0f4c | add some simple unit tests for ADMM | odlgroup/odl,aringh/odl,aringh/odl,kohr-h/odl,kohr-h/odl,odlgroup/odl | odl/test/solvers/nonsmooth/admm_test.py | odl/test/solvers/nonsmooth/admm_test.py | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Unit tests for ADMM."""
from __future__ import division
import odl
from odl.solvers import admm_linearized, Callback
from odl.util.testutils import all_almost_equal, noise_element
def test_admm_lin_input_handling():
"""Test to see that input is handled correctly."""
space = odl.uniform_discr(0, 1, 10)
L = odl.ZeroOperator(space)
f = g = odl.solvers.ZeroFunctional(space)
# Check that the algorithm runs. With the above operators and functionals,
# the algorithm should not modify the initial value.
x0 = noise_element(space)
x = x0.copy()
niter = 3
admm_linearized(x, f, g, L, tau=1.0, sigma=1.0, niter=niter)
assert x == x0
# Check that a provided callback is actually called
class CallbackTest(Callback):
def __init__(self):
self.was_called = False
def __call__(self, *args, **kwargs):
self.was_called = True
callback = CallbackTest()
admm_linearized(x, f, g, L, tau=1.0, sigma=1.0, niter=niter,
callback=callback)
assert callback.was_called
def test_admm_lin_l1():
"""Verify that the correct value is returned for l1 dist optimization.
Solves the optimization problem
min_x ||x - data_1||_1 + 0.5 ||x - data_2||_1
which has optimum value data_1 since the first term dominates.
"""
space = odl.rn(5)
L = odl.IdentityOperator(space)
data_1 = odl.util.testutils.noise_element(space)
data_2 = odl.util.testutils.noise_element(space)
f = odl.solvers.L1Norm(space).translated(data_1)
g = 0.5 * odl.solvers.L1Norm(space).translated(data_2)
x = space.zero()
admm_linearized(x, f, g, L, tau=1.0, sigma=2.0, niter=10)
assert all_almost_equal(x, data_1, places=2)
if __name__ == '__main__':
odl.util.test_file(__file__)
| mpl-2.0 | Python | |
a0bd114b8caf75d28bc52a3aba10494660e6735a | Add lc0138_copy_list_with_random_pointer.py | bowen0701/algorithms_data_structures | lc0138_copy_list_with_random_pointer.py | lc0138_copy_list_with_random_pointer.py | """Leetcode 138. Copy List with Random Pointer
Medium
URL: https://leetcode.com/problems/copy-list-with-random-pointer/
A linked list is given such that each node contains an additional
random pointer which could point to any node in the list or null.
Return a deep copy of the list.
Example 1:
Input:
{"$id":"1","next":{"$id":"2","next":null,"random":{"$ref":"2"},"val":2},
"random":{"$ref":"2"},"val":1}
Explanation:
Node 1's value is 1, both of its next and random pointer points to Node 2.
Node 2's value is 2, its next pointer points to null and
its random pointer points to itself.
Note:
You must return the copy of the given head as a reference to the cloned list.
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
class Solution(object):
def copyRandomList(self, head):
"""
:type head: Node
:rtype: Node
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
f8bde0dd523a46d81a64f9b3bd8633be0bf6676d | Create an exception to throw when a user tries to tag a bad commit | siggame/webserver,siggame/webserver,siggame/webserver | webserver/codemanagement/exceptions.py | webserver/codemanagement/exceptions.py | class CodeManagementException(Exception):
pass
| bsd-3-clause | Python | |
1120f16569e1bc9c9675a9cefd782b09266cd82c | Add initial k-means algo | murphyke/avocado,murphyke/avocado,murphyke/avocado,murphyke/avocado | avocado/stats/kmeans.py | avocado/stats/kmeans.py | import math
from random import random
from collections import namedtuple
Point = namedtuple('Point', ('coords', 'n', 'ct'))
Cluster = namedtuple('Cluster', ('points', 'center', 'n'))
def euclidean(p1, p2):
return math.sqrt(sum([
(p1.coords[i] - p2.coords[i]) ** 2 for i in range(p1.n)
]))
def calculate_center(points, n):
vals = [0.0 for i in range(n)]
plen = 0
for p in points:
plen += p.ct
for i in range(n):
vals[i] += (p.coords[i] * p.ct)
return Point([(v / plen) for v in vals], n, 1)
def kmeans(points, k, min_diff):
clusters = [Cluster([p], p, p.n) for p in random.sample(points, k)]
while True:
plists = [[] for i in range(k)]
for p in points:
smallest_distance = float('Inf')
for i in range(k):
distance = euclidean(p, clusters[i].center)
if distance < smallest_distance:
smallest_distance = distance
idx = i
plists[idx].append(p)
diff = 0
for i in range(k):
old = clusters[i]
center = calculate_center(plists[i], old.n)
new = Cluster(plists[i], center, old.n)
clusters[i] = new
diff = max(diff, euclidean(old.center, new.center))
if diff < min_diff:
break
return clusters
| bsd-2-clause | Python | |
1de5cfe8714c140a79358e4287645f21095abad7 | Create sarafu.py | Hojalab/sarafu,pesaply/sarafu,Hojalab/sarafu,pesaply/sarafu | sarafu.py | sarafu.py | def (sarafu)
print ("sarafu")
| mit | Python | |
d8878fdfae5e4ca61de8b980bbc753f9e86ac655 | Add botaddnitf plugin | jhonnyam123/hangoutsbot | hangupsbot/plugins/botaddnotif.py | hangupsbot/plugins/botaddnotif.py | """
Plugin for monitoring if bot is added to a HO and report it to the bot admins.
Add a "botaddnotif_enable": true parameter in the config.json file.
Author: @cd334
"""
import asyncio
import logging
import hangups
import plugins
logger = logging.getLogger(__name__)
def _initialise(bot):
plugins.register_handler(_handle_join_notify, type="membership")
@asyncio.coroutine
def _handle_join_notify(bot, event, command):
if not event.conv_event.type_ == hangups.MembershipChangeType.JOIN:
return
bot_id = bot._user_list._self_user.id_
if not bot_id in event.conv_event.participant_ids:
return
enable = bot.get_config_option("botaddnotif_enable")
if not enable == True :
return
name = hangups.ui.utils.get_conv_name(event.conv, truncate=False)
message = u'<b>%s</b> has added me to Hangout: <b>%s</b>' % (event.user.full_name, name)
admin_list=bot.get_config_option('admins')
for admin_id in admin_list:
yield from bot.coro_send_to_user(admin_id, message) | agpl-3.0 | Python | |
c2207ed347ab4d804cb7ea966eace6ea93a41326 | Create sensor.py | PythonProgramming/Robotics-with-Raspberry-Pi | sensor.py | sensor.py | import RPi.GPIO as gpio
import time
def distance(measure='cm'):
try:
gpio.setmode(gpio.BOARD)
gpio.setup(12, gpio.OUT)
gpio.setup(16, gpio.IN)
gpio.output(12, False)
while gpio.input(16) == 0:
nosig = time.time()
while gpio.input(16) == 1:
sig = time.time()
tl = sig - nosig
if measure == 'cm':
distance = tl / 0.000058
elif measure == 'in':
distance = tl / 0.000148
else:
print('improper choice of measurement: in or cm')
distance = None
gpio.cleanup()
return distance
except:
distance = 100
gpio.cleanup()
return distance
if __name__ == "__main__":
print(distance("cm"))
| mit | Python | |
5ac2e849fbf4857f7b449cdd39608ba053bddf6e | add main func | rfyiamcool/ExtractLevelDomain | ExtractLevelDomain.py | ExtractLevelDomain.py | #coding=utf-8
import re
from urlparse import urlparse
class ExtractLevelDomain():
def __init__(self):
self.topHostPostfix = (
'.com','.la','.io',
'.co', '.cn','.info',
'.net', '.org','.me',
'.mobi', '.us', '.biz',
'.xxx', '.ca', '.co.jp',
'.com.cn', '.net.cn', '.org.cn',
'.mx','.tv', '.ws',
'.ag', '.com.ag', '.net.ag',
'.org.ag','.am','.asia',
'.at', '.be', '.com.br',
'.net.br',
'.bz',
'.com.bz',
'.net.bz',
'.cc',
'.com.co',
'.net.co',
'.nom.co',
'.de',
'.es',
'.com.es',
'.nom.es',
'.org.es',
'.eu',
'.fm',
'.fr',
'.gs',
'.in',
'.co.in',
'.firm.in',
'.gen.in',
'.ind.in',
'.net.in',
'.org.in',
'.it',
'.jobs',
'.jp',
'.ms',
'.com.mx',
'.nl','.nu','.co.nz','.net.nz',
'.org.nz',
'.se',
'.tc',
'.tk',
'.tw',
'.com.tw',
'.idv.tw',
'.org.tw',
'.hk',
'.co.uk',
'.me.uk',
'.org.uk',
'.vg')
self.extractPattern = r'[\.]('+'|'.join([h.replace('.',r'\.') for h in self.topHostPostfix])+')$'
self.pattern = re.compile(self.extractPattern,re.IGNORECASE)
self.level = "*"
def parse_url(self,url):
parts = urlparse(url)
host = parts.netloc
m = self.pattern.search(host)
return m.group() if m else host
def parse_url_level(self,url,level="*"):
extractRule = self._parse_regex(level)
parts = urlparse(url)
host = parts.netloc
pattern = re.compile(extractRule,re.IGNORECASE)
m = pattern.search(host)
self.level = level
return m.group() if m else host
def set_level(self,level):
extractRule = self._parse_regex(level)
self.extractPattern = extractRule
self.pattern = re.compile(self.extractPattern,re.IGNORECASE)
self.level = level
def _parse_regex(self,level):
extractRule = r'(\w*\.?)%s('+'|'.join([h.replace('.',r'\.') for h in self.topHostPostfix])+')$'
level = level if level == "*" else "{%s}"%level
extractRule = extractRule%(level)
return extractRule
if __name__ == "__main__":
filter = ExtractLevelDomain()
print filter.level
print filter.parse_url('http://dmp.301.xiaorui.cc/redirect/xiaorui.cc')
print filter.parse_url_level('http://dmp.301.xiaorui.cc/redirect/xiaorui.cc',level=2)
filter.set_level(1)
print filter.parse_url_level('http://dmp.301.xiaorui.cc/redirect/xiaorui.cc',level=1)
print filter.level
| mit | Python | |
9ad83570d31469758aeec3c80f45ba8b012cef82 | Create genDataFixedSize.py | kyanyoga/perfmon,kyanyoga/perfmon | bin/genDataFixedSize.py | bin/genDataFixedSize.py | #!/usr/bin python
import time
import random
import base64
import os
import sys
start = time.time()
# pwd = os.path.dirname(__file__)
# outputpath = os.path.normpath(pwd + '/../sample_data/' + sys.argv[1])
outputpath = os.path.normpath(sys.argv[1])
# print outputpath
#run for five m:inutes
# while time.time() < start + 300:
maxfilesize = 3.0 * 1024 * 1024 * 1024
#run forever
while (True):
t = time.strftime('%Y-%m-%dT%H:%M:%S')
timezone = time.strftime('%z')
millis = "%.3d" % (time.time() % 1 * 1000)
#open file for append
outputfile = open(outputpath, 'a+')
#create random values
level = random.sample(['DEBUG', 'INFO', 'WARN', 'ERROR'], 1)[0]
message = random.sample(['Don\'t worry, be happy.',
'error, ERROR, Error!',
'Nothing happened. This is worthless. \
Don\'t log this.',
'Hello world.'], 1)[0]
logger = random.sample(['FooClass',
'BarClass',
'AuthClass',
'LogoutClass',
'BarClass',
'BarClass',
'BarClass',
'BarClass'], 1)[0]
user = random.sample(['jeff',
'mo',
'aaron',
'rajesh',
'sunil',
'zach',
'gus'], 1)[0]
ip = random.sample(['1.2.3.4',
'4.31.2.1',
'1.2.3.',
'1.22.3.3',
'3.2.4.5',
'113.2.4.5'], 1)[0]
req_time = str(int(abs(random.normalvariate(0, 1)) * 1000))
session_length = str(random.randrange(1, 12240))
session_id = base64.b64encode(str(random.randrange(1000000, 1000000000)))
extra = random.sample(['network=qa',
'network=prod',
'session_length=' + session_length,
'session_id="' + session_id + '"',
'user=extrauser'], 1)[0]
fields = []
fields.append('logger=' + logger)
fields.append('user=' + user)
fields.append('ip=' + ip)
fields.append('req_time=' + req_time)
fields.append(extra)
fields.pop(random.randrange(0, len(fields)))
# print to screen
# print "%s.%s%s %s %s [%s]" % (t,
# millis,
# timezone,
# level,
# message,
# ", ".join(fields))
# check for size
fileInfo = os.stat(outputpath)
fileSize = fileInfo.st_size
# print fileSize
# print outputfile.tell()
if fileSize >= maxfilesize:
outputfile.close()
break
# print to file
outputfile.write( "%s.%s%s %s %s [%s]\n" % (t,
millis,
timezone,
level,
message,
", ".join(fields)))
outputfile.close()
#print newline
# sleep to slow down generation
# time.sleep( .7750 / 1000.0 )
| mit | Python | |
fd0e09e11d41d1d7b64f7bf592b788fda8b86e3e | Create archive_identifier.py | Bindernews/TheHound | identifiers/archive_identifier.py | identifiers/archive_identifier.py | from identifier import Result
CAB_PATTERNS = [
'4D 53 43 46'
]
class CabResolver:
def identify(self, stream):
return Result('CAB')
def load(hound):
hound.add_matches(CAB_PATTERNS, CabResolver())
| mit | Python | |
bb619aa09132dbd970d2b78e23fecbd78ee774de | Create new package. (#6191) | matthiasdiener/spack,krafczyk/spack,EmreAtes/spack,LLNL/spack,matthiasdiener/spack,LLNL/spack,iulian787/spack,tmerrick1/spack,matthiasdiener/spack,EmreAtes/spack,skosukhin/spack,krafczyk/spack,mfherbst/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,mfherbst/spack,krafczyk/spack,iulian787/spack,EmreAtes/spack,EmreAtes/spack,skosukhin/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack,krafczyk/spack,tmerrick1/spack,tmerrick1/spack,LLNL/spack,tmerrick1/spack,skosukhin/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,skosukhin/spack,LLNL/spack,mfherbst/spack,skosukhin/spack,iulian787/spack,tmerrick1/spack | var/spack/repos/builtin/packages/r-aneufinder/package.py | var/spack/repos/builtin/packages/r-aneufinder/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAneufinder(RPackage):
"""This package implements functions for CNV calling, plotting,
export and analysis from whole-genome single cell sequencing data."""
homepage = "https://www.bioconductor.org/packages/AneuFinder/"
url = "https://git.bioconductor.org/packages/AneuFinder"
version('1.4.0', git='https://git.bioconductor.org/packages/AneuFinder', commit='e5bdf4d5e4f84ee5680986826ffed636ed853b8e')
depends_on('r@3.4.0:3.4.9', when='@1.4.0')
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-cowplot', type=('build', 'run'))
depends_on('r-aneufinderdata', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-bamsignals', type=('build', 'run'))
depends_on('r-dnacopy', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-ggdendro', type=('build', 'run'))
depends_on('r-reordercluster', type=('build', 'run'))
depends_on('r-mclust', type=('build', 'run'))
depends_on('r-ggrepel', type=('build', 'run'))
| lgpl-2.1 | Python | |
4091f699d0d75a9a506190ec04e2bfc9fc10f9f5 | add Invoke-WCMDump credentials current user | Hackplayers/Empire-mod-Hpys-tests,Hackplayers/Empire-mod-Hpys-tests,Hackplayers/Empire-mod-Hpys-tests,Hackplayers/Empire-mod-Hpys-tests,Hackplayers/Empire-mod-Hpys-tests | lib/modules/powershell/credentials/Invoke-WCMDump.py | lib/modules/powershell/credentials/Invoke-WCMDump.py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Invoke-WCMDump',
# list of one or more authors for the module
'Author': ['Barrett Adams (@peewpw)'],
# more verbose multi-line description of the module
'Description': ('Invoke-WCMDump enumerates Windows credentials in the Credential Manager '
'and then extracts available information about each one. Passwords '
'are retrieved for "Generic" type credentials , but can not be '
'retrived by the same method for "Domain" type credentials.'
'Credentials are only returned for the current user.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
'comment',
'https://github.com/peewpw/Invoke-WCMDump'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to grab a screenshot from.',
'Required' : True,
'Value' : ''
},
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# the PowerShell script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
"""
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-WCMDump.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-WCMDump"
# add any arguments to the end execution of the script
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
return script
| bsd-3-clause | Python | |
d604429f1d27f8753b8d9665a55111a3b90f0699 | Add homebrew version of fluentd logger | erinspace/scrapi,jeffreyliu3230/scrapi,icereval/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,CenterForOpenScience/scrapi,felliott/scrapi,alexgarciac/scrapi,ostwald/scrapi,mehanig/scrapi,felliott/scrapi,fabianvf/scrapi,mehanig/scrapi,fabianvf/scrapi | scrapi/util/logging.py | scrapi/util/logging.py | import logging
from datetime import datetime
from fluent import sender
class FluentHandler(logging.Handler):
'''
Logging Handler for fluent.
'''
def __init__(self,
tag,
host='localhost',
port=24224,
timeout=3.0,
verbose=False):
self.tag = tag
self.sender = sender.FluentSender(tag,
host=host, port=port,
timeout=timeout, verbose=verbose)
logging.Handler.__init__(self)
def emit(self, record):
data = self.format(record)
data = {
'level': record.levelname,
'message': record.msg,
'source': record.name,
'date': datetime.fromtimestamp(record.created).isoformat(),
'fullPath': record.pathname,
'uptime': record.relativeCreated
}
self.sender.emit(None, data)
def close(self):
self.acquire()
try:
self.sender._close()
logging.Handler.close(self)
finally:
self.release()
| apache-2.0 | Python | |
cfeb4fbfa44b597772f1eb63d828e605f9e39396 | Add server | idiotandrobot/heathergraph | server.py | server.py | import argparse
import os
import signal
import subprocess
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('operation',
choices=['start', 'stop', 'restart'],
help='start/stop/restart the server')
return parser.parse_args()
def find_server_process(components):
proc = subprocess.Popen(['ps', '-e', '-o', 'pid,command'],
stdout=subprocess.PIPE)
(out, dummy) = proc.communicate()
processes = [i.strip() for i in str(out).split('\n')[1:] if i]
if len(processes) > 0:
for line in processes:
fields = line.split(None, 1)
if fields[1] == ' '.join(components):
return int(fields[0])
return None
def get_absolute_filename(relative_name):
return os.path.abspath(os.path.join(os.path.dirname(__file__),
relative_name))
def start_service():
proc = subprocess.Popen(launch_command)
print('Heathergraph service started - PID={}'.format(proc.pid))
def stop_service(pid):
if pid:
print('Stopping Heathergraph service - PID={}'.format(pid))
os.kill(pid, signal.SIGTERM)
else:
print('No Heathergraph service was found to be running')
def main():
args = parse_args()
launch_command = ['python',
get_absolute_filename('heathergraph.py')]
pid = find_server_process(launch_command)
if args.operation == 'start':
if not pid:
start_service()
else:
print('Heathergraph service is already running')
elif args.operation == 'stop':
stop_service(pid)
elif args.operation == 'restart':
stop_service(pid)
start_service()
if __name__ == '__main__':
main()
| mit | Python | |
8fda2e1330277e98b62d3286e5c208d320fc07db | Add simple api to redis with flask | keoghpe/daftpunk,nicr9/daftpunk,keoghpe/daftpunk,nicr9/daftpunk,keoghpe/daftpunk,nicr9/daftpunk | server.py | server.py | from flask import Flask
from flask import json
from flask import Response
import redis
app = Flask(__name__)
r = redis.StrictRedis(host='localhost', port=6379, db=0)
@app.route("/")
def hello():
return "Hello ld!"
@app.route('/properties/')
def show_properties():
props = r.smembers('daftpunk:properties')
data = []
for n in props:
data.append({"id":n, "address": r.get('daftpunk:%s:address' % n)})
resp = Response(json.dumps(data), status=200, mimetype='application/json')
return resp
@app.route('/property/<id>')
def show_property(id):
data = {"id":id, "address": r.get('daftpunk:%s:address' % id)}
resp = Response(json.dumps(data), status=200, mimetype='application/json')
return resp
if __name__ == "__main__":
app.run(debug=True) | mit | Python | |
ef108d756ae91925ca0afec280151974eae4a696 | add import script to load existing data in influxdb, optional | horkko/biomaj,horkko/biomaj,genouest/biomaj,genouest/biomaj | scripts/influxdb_import.py | scripts/influxdb_import.py | '''
Import biomaj banks statistics in Influxdb if never done before.....
'''
from influxdb import InfluxDBClient
from biomaj.bank import Bank
from biomaj_core.config import BiomajConfig
import sys
if len(sys.argv) != 1:
print('Usage: influxdb_import.py path_to_global.properties')
sys.exit(1)
BiomajConfig.load_config(config_file=sys.argv[1])
influxdb = None
try:
influxdb = InfluxDBClient(host='biomaj-influxdb', database='biomaj')
except Exception as e:
print('Failed to connect to influxdb, check configuration in global.properties: ' + str(e))
sys.exit(1)
res = influxdb.query('select last("value") from "biomaj.banks.quantity"')
if res:
print('Found data in influxdb, update info....')
banks = Bank.list()
nb_banks = 0
metrics = []
for bank in banks:
productions = bank['production']
total_size = 0
latest_size = 0
if not productions:
continue
nb_banks += 1
latest_size = productions[len(productions) - 1]['size']
for production in productions:
if 'size' in production:
total_size += production['size']
influx_metric = {
"measurement": 'biomaj.production.size.total',
"fields": {
"value": float(total_size)
},
"tags": {
"bank": bank['name']
},
"time": int(production['session'])
}
metrics.append(influx_metric)
influx_metric = {
"measurement": 'biomaj.production.size.latest',
"fields": {
"value": float(latest_size)
},
"tags": {
"bank": bank['name']
},
"time": int(production['session'])
}
metrics.append(influx_metric)
influx_metric = {
"measurement": 'biomaj.bank.update.new',
"fields": {
"value": 1
},
"tags": {
"bank": bank['name']
},
"time": int(production['session'])
}
metrics.append(influx_metric)
influx_metric = {
"measurement": 'biomaj.banks.quantity',
"fields": {
"value": nb_banks
}
}
metrics.append(influx_metric)
influxdb.write_points(metrics, time_precision="s")
| agpl-3.0 | Python | |
540f913d6b9402512bc2b507504f77f709c17eca | add exec example | anpere/goaway | examples/exec.py | examples/exec.py | """
Example uses of exec.
exec is a special form which takes 1, 2, or 3 arguments.
exec(expr, globals, locals)
locals and globals are optional.
expr is a string to be executed as code.
globals is a dictionary from symbol names to values.
locals is a dictionary from symbol names to values.
"""
import inspect
import numpy as np
def exec_verbose(expr, globalsd=None, localsd=None):
"""Wraps exec() and prints some stuff.
Behaves just like exec with the following exceptions:
- Prints the expr to be exec'd.
- Catches and reports exceptions but does not throw.
"""
# This line prints expr and whether or not global and locals exist.
print "exec" + (" (g)" if globalsd != None else "") + (" (l)" if localsd != None else "") + ": " + expr
try:
if (globalsd == None) and (localsd == None):
exec(expr)
elif (globalsd != None) and (localsd == None):
exec(expr, globalsd)
elif (globalsd != None) and (localsd != None):
exec(expr, globalsd, localsd)
else:
raise RuntimeError("bad exec_verbose args")
except Exception as ex:
print "exec failed:", type(ex).__name__, ex
a = 1
b = 2
print "Exec with implicit globals and locals."
exec_verbose("print a, b")
# 1 2
print
print "With empty globals."
exec_verbose("print a, b", {})
# exec failed: NameError name 'a' is not defined
print
print "With custom globals."
exec_verbose("print a, b", {"a": 3, "b": 4})
# 3 4
print
print "With shadowing of globals by locals."
exec_verbose("print a, b", {"a": 3, "b": 4}, {"b": 5})
# 3 5
print
print "Refer to imports within this file."
exec_verbose("print np.sin(0)")
# 0.0
print
print "Supplying globals kills imports."
exec_verbose("print np.sin(0)", {})
# exec failed: NameError name 'np' is not defined
print
print "You can simulate them."
exec_verbose("print np.sin(0)", {"np": np})
# 0.0
print
print "And do dirty tricks."
class FakeNumpy(object):
def sin(self, x):
return -1
print inspect.getsource(FakeNumpy).strip()
exec_verbose("print np.sin(0)", {"np": FakeNumpy()})
# -1
print
| mit | Python | |
c48b6ea55969adff7e0662c551a529161a4d0b94 | add kattis/stockprices | mjenrungrot/algorithm,mjenrungrot/competitive_programming,mjenrungrot/competitive_programming,mjenrungrot/competitive_programming,mjenrungrot/competitive_programming | Kattis/stockprices.py | Kattis/stockprices.py | """
Problem: stockprices
Link: https://open.kattis.com/problems/stockprices
Source: NWERC 2010
"""
import queue
import sys
def runTest():
N = int(input())
buyHeap = queue.PriorityQueue() # MinHeap
sellHeap = queue.PriorityQueue() # MinHeap
stockPrice = None
for i in range(N):
command, n, _, _, price = input().split()
n = int(n)
price = int(price)
if command == "buy":
buyHeap.put((int(-price), n))
else:
sellHeap.put((int(price), n))
bestBid = buyHeap.queue[0] if len(buyHeap.queue) > 0 else None
bestAsk = sellHeap.queue[0] if len(sellHeap.queue) > 0 else None
while (bestBid is not None) and (bestAsk is not None) and (-bestBid[0] >= bestAsk[0]):
stockPrice = bestAsk[0]
nBid = bestBid[1]
nAsk = bestAsk[1]
print("Best bid = {:} ({:})".format(-bestBid[0], nBid), file=sys.stderr)
print("Best ask = {:} ({:})".format(bestAsk[0], nAsk), file=sys.stderr)
if nBid > nAsk:
buyHeap.get()
sellHeap.get()
buyHeap.put((bestBid[0], nBid-nAsk))
elif nBid < nAsk:
buyHeap.get()
sellHeap.get()
sellHeap.put((bestAsk[0], nAsk-nBid))
else:
buyHeap.get()
sellHeap.get()
bestBid = buyHeap.queue[0] if len(buyHeap.queue) > 0 else None
bestAsk = sellHeap.queue[0] if len(sellHeap.queue) > 0 else None
bestBid = buyHeap.queue[0] if len(buyHeap.queue) > 0 else None
bestAsk = sellHeap.queue[0] if len(sellHeap.queue) > 0 else None
if bestAsk is not None: print("{:}".format(int(bestAsk[0])), end=" ")
else: print("-", end=" ")
if bestBid is not None: print("{:}".format(int(-bestBid[0])), end=" ")
else: print("-", end=" ")
if stockPrice is not None: print("{:}".format(int(stockPrice)), end="\n")
else: print("-", end="\n")
T = int(input())
for i in range(T):
runTest()
| mit | Python | |
b01445701c2974c0f69c9a43208111f0b80a167f | Create helloWorld.py | ErDhananjay/Scripting,ErDhananjay/Scripting | helloWorld.py | helloWorld.py | print "Hello World!"
| unlicense | Python | |
9ad2a898298667aa6adfbf0c4e786e431c9a96b1 | test test test | gspindles/mj-score-eval,gspindles/mj-score-eval | python-ver/test.py | python-ver/test.py | print 'blah blah blah'
| mit | Python | |
27575c3fd6bdc55748b808a98c0b19e3edfb17af | Create ShakeBoussole.py | jancelin/geo-poppy,jancelin/geo-poppy | sense-hat/ShakeBoussole.py | sense-hat/ShakeBoussole.py | from sense_hat import SenseHat
import time
import sys
sense = SenseHat()
led_loop = [4, 5, 6, 7, 15, 23, 31, 39, 47, 55, 63, 62, 61, 60, 59, 58, 57, 56, 48, 40, 32, 24, 16, 8, 0, 1, 2, 3]
sense = SenseHat()
sense.set_rotation(0)
sense.clear()
prev_x = 0
prev_y = 0
led_degree_ratio = len(led_loop) / 360.0
while True:
x, y, z = sense.get_accelerometer_raw().values()
x = abs(x)
y = abs(y)
z = abs(z)
if x > 1 or y > 1 or z > 1:
while True:
dir = sense.get_compass()
dir_inverted = 180 - dir # So LED appears to follow North
led_index = int(led_degree_ratio * dir_inverted)
offset = led_loop[led_index]
y = offset // 8 # row
x = offset % 8 # column
if x != prev_x or y != prev_y:
sense.set_pixel(prev_x, prev_y, 0, 0, 0)
sense.set_pixel(x, y, 0, 0, 255)
prev_x = x
prev_y = y
else:
sense.clear()
time.sleep(2)
sense.stick.direction_middle = sense.clear()
| agpl-3.0 | Python | |
fb96906301515b268d56bb7a494360f794883223 | include migration for uniq | SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci | metaci/release/migrations/0002_auto_20180815_2248.py | metaci/release/migrations/0002_auto_20180815_2248.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-15 22:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0005_repository_release_tag_regex'),
('release', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='release',
unique_together=set([('repo', 'git_tag')]),
),
]
| bsd-3-clause | Python | |
c32a9c28c6f868e479d7107a87ba44478d2bc6b2 | add sequana_mapping standalone | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | sequana/scripts/mapping.py | sequana/scripts/mapping.py | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from snakemake import shell as shellcmd
import shutil
import glob
import sys
from optparse import OptionParser
import argparse
class Options(argparse.ArgumentParser):
def __init__(self, prog="sequana_mapping"):
usage = """Welcome to SEQUANA - mapping standalone
sequana_mapping --file1 R1.fastq --file2 R2.fastq --reference reference
This is a simple mapper for quick test. The commands ran are as follows::
# Indexing
bwa index REFERENCE
samtools faidx REFERENCE
# mapping
bwa_mem -t 4 -R @RG\\tID:1\\tSM:1\\tPL:illumina -T 30 REFERENCE FASTQ_FILES | samtools
view -Sbh -> REFERENCE.bam
samtools sort -o REFERENCE.sorted.bam REFERENCE.bam
AUTHORS: Sequana Consortium
Documentation: http://sequana.readthedocs.io
Issues: http://github.com/sequana/sequana
"""
description = """DESCRIPTION:
"""
super(Options, self).__init__(usage=usage, prog=prog,
description=description)
# options to fill the config file
self.add_argument("--file1", dest="file1", type=str,
default=None, required=True,
help="""R1 fastq file (zipped) """)
self.add_argument("--file2", dest="file2", type=str,
default=None,
help="""R2 fastq file (zipped) """)
self.add_argument("--reference", dest="reference", type=str,
help="""reference """)
def main(args=None):
if args is None:
args = sys.argv[:]
user_options = Options(prog="sequana")
# If --help or no options provided, show the help
if len(args) == 1:
user_options.parse_args(["prog", "--help"])
else:
options = user_options.parse_args(args[1:])
reference = options.reference
if options.file1 and options.file2:
fastq = "%s %s" % (options.file1, options.file2)
elif options.file1 and not options.file2:
fastq = "%s" % (options.file1)
elif options.file1 is None:
raise ValueError("--file1 must be used")
# Compute DOC
from sequana import FastQ
from sequana import FastA
S = 0
for this in FastQ(options.file1):
S += len(this['sequence'])
if options.file2:
for this in FastQ(options.file2):
S += len(this['sequence'])
ref = FastA(options.reference)
coverage = float(S) / len(ref.sequences[0])
print('Theoretical Depth of Coverage : %s' % coverage)
params = {"reference": reference, "fastq": fastq}
# indexing
shellcmd("bwa index %(reference)s " % params)
cmd = "samtools faidx %(reference)s " % params
# mapping
cmd = r"bwa mem -t 4 -R @RG\\tID:1\\tSM:1\\tPL:illumina -T 30 %(reference)s %(fastq)s "
cmd += "| samtools view -Sbh -> %(reference)s.bam "
shellcmd(cmd % params)
# sorting BAM
shellcmd("samtools sort -o %(reference)s.sorted.bam %(reference)s.bam" % params)
"""reference = "JB409847"
fastq = "Cherry-1_S7_L001_R1_cutadapt_trim_1.fq"
params = {"reference": reference, "fastq": fastq}
shellcmd("bwa index %(reference)s.fa" % params)
cmd = "samtools faidx %(reference)s.fa" % params
shellcmd(r"bwa mem -t 4 -R @RG\\tID:1\\tSM:1\\tPL:illumina -T 30 %(reference)s.fa %(fastq)s | samtools view -Sbh -> %(reference)s.bam" % params)
shellcmd("samtools sort -o %(reference)s.sorted.bam %(reference)s.bam" % params)
"""
| bsd-3-clause | Python | |
c503471f3d7318a2519b486b8be74ec1d1f2e235 | Add an admin interface for xmlrpc tokens | Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server | linaro_django_xmlrpc/admin.py | linaro_django_xmlrpc/admin.py | from django.contrib import admin
from linaro_django_xmlrpc.models import AuthToken
class AuthTokenAdmin(admin.ModelAdmin):
list_display = ('user', 'description', 'created_on', 'last_used_on')
admin.site.register(AuthToken, AuthTokenAdmin)
| agpl-3.0 | Python | |
acdc8dd7006955f89507018d1b7e39092f1d2e07 | set r2dbe header without restarting everything | sao-eht/lmtscripts,sao-eht/lmtscripts,sao-eht/lmtscripts,sao-eht/lmtscripts | r2dbe_setheader.py | r2dbe_setheader.py | import adc5g, corr
from time import sleep
from datetime import datetime, time, timedelta
roach2 = corr.katcp_wrapper.FpgaClient('r2dbe-1')
roach2.wait_connected()
sblookup = {0:'LSB', 1:'USB'}
pollookup = {0:'X/L', 1:'Y/R'}
sbmap = {'LSB':0, 'USB':1}
polmap = {'X':0, 'Y':1, 'L':0, 'R':1, 'LCP':0, 'RCP':1}
# IF0:station IF0:Sky_SB IF0:BDC_SB IF0:POL IF1:station IF1:Sky_SB IF1:BDC_SB IF1:POL | comment
#0 1 2 3 4 5 6 7
configs = """
Lm LSB LSB LCP Ln LSB LSB RCP | 4-6 interim config
Lm LSB USB LCP Ln LSB USB RCP | 6-8 interim config
Lm USB LSB LCP Ln LSB LSB RCP | 4-6 ALMA config
Lm USB USB LCP Ln LSB USB RCP | 6-8 ALMA config
"""
choices = configs.strip().split('\n')
print "Please choose a configuration [1-%d]:" % (len(choices))
for (i, line) in enumerate(choices):
print "%3d) %s" % (i+1, line)
i = raw_input("-> ")
choice = choices[int(i)-1]
par = choice.split('|')[0].strip().split()
# set variables
station_id_0 = par[0]
station_id_1 = par[4]
skysb_block_0 = sbmap[par[1]]
skysb_block_1 = sbmap[par[5]]
bdcsb_block_0 = sbmap[par[2]]
bdcsb_block_1 = sbmap[par[6]]
pol_block_0 = polmap[par[3]]
pol_block_1 = polmap[par[7]]
print "setting IF parameters:"
print " IF0: %s (%s) - Sky:%s, BDC:%s" % (par[0], par[3], par[1], par[2])
print " IF1: %s (%s) - Sky:%s, BDC:%s" % (par[4], par[7], par[5], par[6])
ref_ep_num = 32 #2014 part 2 = 29
ref_ep_date = datetime(2016,1,1,0,0,0) # date of start of epoch July 1 2014
utcnow = datetime.utcnow()
wait = (1500000 - utcnow.microsecond) % 1e6
sleep(wait / 1e6)
utcnow = datetime.utcnow()
delta = utcnow-ref_ep_date
sec_ref_ep = delta.seconds + 24*3600*delta.days
nday = sec_ref_ep/24/3600 # LLB: I believe nday here is off by 1 (0 indexed)
roach2.write_int('r2dbe_vdif_0_hdr_w0_reset',1)
roach2.write_int('r2dbe_vdif_0_hdr_w0_reset',0)
roach2.write_int('r2dbe_vdif_1_hdr_w0_reset',1)
roach2.write_int('r2dbe_vdif_1_hdr_w0_reset',0)
roach2.write_int('r2dbe_vdif_0_hdr_w0_sec_ref_ep',sec_ref_ep)
roach2.write_int('r2dbe_vdif_1_hdr_w0_sec_ref_ep',sec_ref_ep)
roach2.write_int('r2dbe_vdif_0_hdr_w1_ref_ep',ref_ep_num)
roach2.write_int('r2dbe_vdif_1_hdr_w1_ref_ep',ref_ep_num)
############
# W3
############
# roach2.write_int('r2dbe_vdif_0_hdr_w3_thread_id', thread_id_0)
# roach2.write_int('r2dbe_vdif_1_hdr_w3_thread_id', thread_id_1)
# convert chars to 16 bit int
st0 = ord(station_id_0[0])*2**8 + ord(station_id_0[1])
st1 = ord(station_id_1[0])*2**8 + ord(station_id_1[1])
roach2.write_int('r2dbe_vdif_0_hdr_w3_station_id', st0)
roach2.write_int('r2dbe_vdif_1_hdr_w3_station_id', st1)
############
# W4
############
eud_vers = 0x02
w4_0 = eud_vers*2**24 + skysb_block_0*4 + bdcsb_block_0*2 + pol_block_0
w4_1 = eud_vers*2**24 + skysb_block_1*4 + bdcsb_block_1*2 + pol_block_1
roach2.write_int('r2dbe_vdif_0_hdr_w4',w4_0)
roach2.write_int('r2dbe_vdif_1_hdr_w4',w4_1)
| mit | Python | |
a0702b8ac74c4976cf747880bdfeb86088a16715 | CREATE new Syft Message structure | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | packages/syft/src/syft/core/node/common/node_service/generic_payload/syft_message.py | packages/syft/src/syft/core/node/common/node_service/generic_payload/syft_message.py | # stdlib
from typing import Any
from typing import Dict
from typing import Optional
# third party
from nacl.signing import VerifyKey
# relative
from .....common.message import ImmediateSyftMessage, SignedMessage
from .....common.uid import UID
from .....io.address import Address
from ....abstract.node_service_interface import NodeServiceInterface
class SyftMessage(ImmediateSyftMessage):
__attr_allowlist__ = ["id", "payload", "address", "reply_to", "msg_id", "kwargs"]
signed_type = SignedMessage
def __init__(
self,
address: Address,
kwargs: Dict[str,Any] = {},
msg_id: Optional[UID] = None,
reply_to: Optional[Address] = None) -> None:
super().__init__(address=address, msg_id=msg_id)
self.reply_to = reply_to
self.kwargs = kwargs
def run(self, node: NodeServiceInterface, verify_key: Optional[VerifyKey] = None) -> ImmediateSyftMessage:
raise NotImplementedError
| apache-2.0 | Python | |
af61a720abe964c2295d8f0aa555fed9bb67372a | add 4 | ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler | 004.py | 004.py | def pal(value):
return str(value) == str(value)[::-1]
p = 1
for i in xrange(999, 99, -1):
for j in xrange(999, 99, -1):
n = i * j
if n > p and pal(n):
p = n
print p | bsd-3-clause | Python | |
c15c10567a933b605e598129e02ac89bf03d3f02 | add move operation module | yinyin/FileWatcher | lib/filewatcher/operator/mover.py | lib/filewatcher/operator/mover.py | # -*- coding: utf-8 -*-
""" 檔案操作作業模組 """
import os
import shutil
from filewatcher import componentprop
_cached_module_prop_instance = componentprop.OperatorProp('mover', 'move_to', schedule_priority=2, run_priority=2)
def get_module_prop():
""" 取得操作器各項特性/屬性
參數:
(無)
回傳值:
傳回 componentprop.OperatorProp 物件
"""
return _cached_module_prop_instance
# ### def get_module_prop
def operator_configure(config, metastorage):
""" 設定操作器組態
參數:
config - 帶有參數的字典
metastorage - 中介資訊資料庫物件
回傳值:
(無)
"""
pass
# ### def operator_configure
def read_operation_argv(argv):
""" 取得操作設定
參數:
argv - 設定檔中的設定
回傳值:
吻合工作模組需求的設定物件
"""
if os.path.isdir(argv) and os.access(argv, os.W_OK):
return os.path.abspath(argv)
return None
# ### read_operation_argv
def perform_operation(current_filepath, orig_filename, argv, oprexec_ref, logqueue=None):
""" 執行操作
參數:
current_filepath - 目標檔案絕對路徑 (如果是第一個操作,可能檔案名稱會是更改過的)
orig_filename - 原始檔案名稱 (不含路徑)
argv - 設定檔給定的操作參數
oprexec_ref - 作業參考物件 (含: 檔案名稱與路徑名稱比對結果、檔案內容數位簽章... etc)
logqueue - 紀錄訊息串列物件
回傳值:
經過操作後的檔案絕對路徑
"""
target_path = os.path.join(argv, orig_filename)
try:
if (True == os.access(target_path, os.F_OK)) and (False == os.access(target_path, os.W_OK)):
os.unlink(target_path)
shutil.move(current_filepath, target_path)
logqueue.append("move %r to %r success" % (current_filepath, target_path,))
return target_path
except (shutil.Error, IOError) as e:
logqueue.append("move %r to %r failed: %r" % (current_filepath, target_path, e,))
return None
# ### def perform_operation
def operator_stop():
""" 停止作業,準備結束
參數:
(無)
回傳值:
(無)
"""
pass
# ### def operator_stop
# vim: ts=4 sw=4 ai nowrap
| mit | Python | |
ee1e049a4cbe47ce106824612a69d738562eceb3 | add simple test for testing that view change messages are checked on the receiver side | evernym/zeno,evernym/plenum | plenum/test/view_change/test_instance_change_msg_checking.py | plenum/test/view_change/test_instance_change_msg_checking.py | import pytest
import types
from plenum.common.types import InstanceChange
def test_instance_change_msg_type_checking(nodeSet, looper, up):
nodeA = nodeSet.Alpha
nodeB = nodeSet.Beta
ridBetta = nodeA.nodestack.getRemote(nodeB.name).uid
badViewNo = "BAD"
nodeA.send(InstanceChange(badViewNo), ridBetta)
looper.runFor(.2) | apache-2.0 | Python | |
a85dc832edf2793fd22489f7801fcdd7e74ec79c | add figure composite | billryan/fig_composite | fig_composite.py | fig_composite.py | #!/usr/bin/env python2
#-*-coding:utf-8 -*-
import os
import sys
import re
import PIL
from PIL import Image
#fig_in_path = os.path.dirname(os.path.realpath(sys.argv[0]))
fig_in_path = os.path.relpath(os.path.dirname(os.path.realpath(sys.argv[0])))
fig_out_path = fig_in_path + "/fig_trans/"
fig_resize_out_path = fig_in_path + "/fig_resize/"
def conv_black2transparency(in_file, out_path):
img = Image.open(in_file)
img = img.convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
newData.append((255, 255, 255, 0))
else:
newData.append(item)
img.putdata(newData)
out_file = out_path + in_file
ensure_dir(out_file)
img.save(out_file, "PNG")
return 0
#get the file recursively
file_list = []
trans_flag = False
def get_recursive_file_list(path):
current_files = os.listdir(path)
for file_name in current_files:
full_file_name = os.path.join(path, file_name)
print full_file_name
if os.path.isdir(full_file_name):
if trans_flag == False:
if os.path.realpath(full_file_name).find("fig_trans") != -1:
print "Transparence fig already done"
continue
get_recursive_file_list(full_file_name)
elif (None != re.search('(png|bmp|jpg|jpeg)$', file_name, re.IGNORECASE)):
file_list.append(full_file_name)
return file_list
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
return 0
fig_in_list = get_recursive_file_list(fig_in_path)
img_size = 500,400
for fn in fig_in_list:
img = Image.open(fn)
img = img.resize(img_size,Image.ANTIALIAS)
fname,extension = os.path.splitext(fn)
newfile = fname+extension
if extension != ".png" :
newfile = fname + ".png"
#fig_resize_out_file = fig_resize_out_path + newfile
#ensure_dir(fig_resize_out_file)
img.save(fn,"PNG")
print "Resizing file : %s" % (fn)
conv_black2transparency(fn, fig_out_path)
print("File %s is converted to transparency.") %fn
print("Congratulations! All of the raw fig have been converted to transparency.")
trans_flag = True
file_list = []
trans_fig_list = get_recursive_file_list(fig_out_path)
print "fig_out_path=%s" %fig_out_path
print trans_fig_list
fig_temp = "fig_final.png"
img0 = Image.open(trans_fig_list[0])
img1 = Image.open(trans_fig_list[1])
Image.alpha_composite(img0, img1).save(fig_temp, "PNG")
print "First composition"
for i in xrange(2,len(trans_fig_list)):
img0 = Image.open(fig_temp)
img1 = Image.open(trans_fig_list[i])
Image.alpha_composite(img0, img1).save(fig_temp, "PNG")
| mit | Python | |
21b7b4f2be33eb30545292a1c46f4072d3795e97 | Add link.py | dustalov/watlink,dustalov/watlink | misc/link.py | misc/link.py | #!/usr/bin/env python
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
import argparse
import csv
import sys
import itertools
from collections import defaultdict, Counter
from math import log
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import cosine_similarity as sim
from operator import itemgetter
from multiprocessing import Pool, cpu_count
parser = argparse.ArgumentParser()
parser.add_argument('--synsets', required=True)
parser.add_argument('--isas', required=True)
parser.add_argument('-k', nargs='?', type=int, default=6)
args = vars(parser.parse_args())
synsets, index, lexicon = {}, defaultdict(lambda: set()), set()
with open(args['synsets']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
synsets[int(row[0])] = [word.lower() for word in row[2].split(', ') if word]
for word in synsets[int(row[0])]:
index[word].add(int(row[0]))
lexicon.update(synsets[int(row[0])])
isas = defaultdict(lambda: set())
with open(args['isas']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for hyponym, hypernym in reader:
if hyponym in lexicon and hypernym in lexicon:
isas[hyponym].add(hypernym)
idf, D = defaultdict(lambda: 0), .0
for words in synsets.values():
hypernyms = [isas[word] for word in words if word in isas]
if not hypernyms:
continue
for hypernym in set.union(*hypernyms):
idf[hypernym] += 1
D += 1
idf = {hypernym: log(D / df) for hypernym, df in idf.items()}
def tf(w, words):
return float(Counter(words)[w])
def tfidf(w, words):
return tf(w, words) * idf.get(w, 1.)
hctx = {}
for id, words in synsets.items():
hypernyms = list(itertools.chain(*(isas[word] for word in words if word in isas)))
if not hypernyms:
continue
hctx[id] = {word: tfidf(word, hypernyms) for word in hypernyms}
v = DictVectorizer().fit(hctx.values())
def emit(id):
hypernyms, vector, hsenses = hctx[id], v.transform(hctx[id]), {}
for hypernym in hypernyms:
candidates = {hid: synsets[hid] for hid in index[hypernym]}
if not candidates:
continue
candidates = {hid: {word: tfidf(word, words) for word in words} for hid, words in candidates.items()}
candidates = {hid: sim(vector, v.transform(words)) for hid, words in candidates.items()}
hid, cosine = max(candidates.items(), key=itemgetter(1))
if cosine > 0:
hsenses[(hypernym, hid)] = cosine
hsenses = dict(dict(sorted(hsenses.items(), key=itemgetter(1), reverse=True)[:args['k']]).keys())
return (id, hsenses)
i = 0
with Pool(cpu_count()) as pool:
for id, hsenses in pool.imap_unordered(emit, hctx):
i += 1
print('%d\t%s' % (id, ', '.join(('%s#%d' % e for e in hsenses.items()))))
if i % 1000 == 0:
print('%d entries out of %d done.' % (i, len(hctx)), file=sys.stderr, flush=True)
if len(hctx) % 1000 != 0:
print('%d entries out of %d done.' % (len(hctx), len(hctx)), file=sys.stderr, flush=True)
| mit | Python | |
e1b47df9fadb888dafc32abc8018b15477d74feb | Add python test unit. | mrlitong/fpsgame,mrlitong/fpsgame,mrlitong/Game-Engine-Development-Usage,mrlitong/fpsgame | fpsgame/tests.py | fpsgame/tests.py | from ctypes import *
import sys
import os
import xml.etree.ElementTree as ET
binaries = '../../../binaries'
# Work out the platform-dependent library filename
dll_filename = {
'posix': './libCollada_dbg.so',
'nt': 'Collada_dbg.dll',
}[os.name] | mit | Python | |
eebbc6743f4aa5c29b7b915580cf9ba0362d889a | Add tests for error cases for the array API elementwise functions | cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy | numpy/_array_api/tests/test_elementwise_functions.py | numpy/_array_api/tests/test_elementwise_functions.py | from inspect import getfullargspec
from numpy.testing import assert_raises
from .. import asarray, _elementwise_functions
from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
from .._dtypes import (_all_dtypes, _boolean_dtypes, _floating_dtypes,
_integer_dtypes, _integer_or_boolean_dtypes,
_numeric_dtypes)
def nargs(func):
return len(getfullargspec(func).args)
def test_function_types():
# Test that every function accepts only the required input types. We only
# test the negative cases here (error). The positive cases are tested in
# the array API test suite.
elementwise_function_input_types = {
'abs': 'numeric',
'acos': 'floating',
'acosh': 'floating',
'add': 'numeric',
'asin': 'floating',
'asinh': 'floating',
'atan': 'floating',
'atan2': 'floating',
'atanh': 'floating',
'bitwise_and': 'integer_or_boolean',
'bitwise_invert': 'integer_or_boolean',
'bitwise_left_shift': 'integer',
'bitwise_or': 'integer_or_boolean',
'bitwise_right_shift': 'integer',
'bitwise_xor': 'integer_or_boolean',
'ceil': 'numeric',
'cos': 'floating',
'cosh': 'floating',
'divide': 'floating',
'equal': 'all',
'exp': 'floating',
'expm1': 'floating',
'floor': 'numeric',
'floor_divide': 'numeric',
'greater': 'numeric',
'greater_equal': 'numeric',
'isfinite': 'numeric',
'isinf': 'numeric',
'isnan': 'numeric',
'less': 'numeric',
'less_equal': 'numeric',
'log': 'floating',
'logaddexp': 'floating',
'log10': 'floating',
'log1p': 'floating',
'log2': 'floating',
'logical_and': 'boolean',
'logical_not': 'boolean',
'logical_or': 'boolean',
'logical_xor': 'boolean',
'multiply': 'numeric',
'negative': 'numeric',
'not_equal': 'all',
'positive': 'numeric',
'pow': 'floating',
'remainder': 'numeric',
'round': 'numeric',
'sign': 'numeric',
'sin': 'floating',
'sinh': 'floating',
'sqrt': 'floating',
'square': 'numeric',
'subtract': 'numeric',
'tan': 'floating',
'tanh': 'floating',
'trunc': 'numeric',
}
_dtypes = {
'all': _all_dtypes,
'numeric': _numeric_dtypes,
'integer': _integer_dtypes,
'integer_or_boolean': _integer_or_boolean_dtypes,
'boolean': _boolean_dtypes,
'floating': _floating_dtypes,
}
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1., dtype=d)
for x in _array_vals():
for func_name, types in elementwise_function_input_types.items():
dtypes = _dtypes[types]
func = getattr(_elementwise_functions, func_name)
if nargs(func) == 2:
for y in _array_vals():
if x.dtype not in dtypes or y.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x, y))
else:
if x.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x))
def test_bitwise_shift_error():
# bitwise shift functions should raise when the second argument is negative
assert_raises(ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1])))
assert_raises(ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1])))
| mit | Python | |
412d27b31dc5644c84ac90179fe74669ce8a406c | change description & goal from varchar(100) to text | talkoopaiva/talkoohakemisto-api | talkoohakemisto/migrations/versions/221e6ee3f6c9_adjust_goal_and_description_size.py | talkoohakemisto/migrations/versions/221e6ee3f6c9_adjust_goal_and_description_size.py | """adjust goal and description size
Revision ID: 221e6ee3f6c9
Revises: 27f12bb68b12
Create Date: 2014-04-12 17:04:16.750942
"""
# revision identifiers, used by Alembic.
revision = '221e6ee3f6c9'
down_revision = '27f12bb68b12'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute(
'''
ALTER TABLE voluntary_work
ALTER COLUMN description
TYPE text
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER COLUMN goal
TYPE text
'''
)
pass
def downgrade():
op.execute(
'''
ALTER TABLE voluntary_work
ALTER COLUMN description
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER COLUMN goal
TYPE varchar(100)
'''
)
pass
| mit | Python | |
8c1f1c0728b261526b19c46dbd459bbc0f4e97a8 | add leetcode Maximum Depth of Binary Tree | Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code | leetcode/MaximumDepthOfBinaryTree/solution.py | leetcode/MaximumDepthOfBinaryTree/solution.py | # -*- coding:utf-8 -*-
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if not root:
return 0
hight = max(self.maxDepth(root.left), self.maxDepth(root.right))
return 1 + hight
| mit | Python | |
558c68060903608abe0bbe15303f192eacf529eb | Add way to call converters/harvesters in 0.2.0 | materials-data-facility/forge | proto_main.py | proto_main.py | from importlib import import_module
def call_harvester(source_name, **kwargs):
harvester = import_module("mdf_indexers.harvesters." + source_name + "_harvester")
harvester.harvest(**kwargs)
def call_converter(sources, input_path=None, metadata=None, verbose=False):
if type(sources) is not list:
sources = [sources]
if verbose:
print("CONVERTING THE FOLLOWING DATASETS:", sources)
for source_name in sources:
if verbose:
print("\nCONVERTER FOR", source_name, "\n")
converter = import_module("mdf_indexers.converters." + source_name + "_converter")
if not input_path:
# Relative path is from calling function, not sub-function: paths.datasets will be wrong
# Use "mdf_indexers/datasets/X" instead
input_path = "mdf_indexers/datasets/" + source_name + "/"
converter.convert(input_path=input_path, metadata=metadata, verbose=verbose)
if verbose:
print("\nALL CONVERTING COMPLETE")
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
call_converter(*sys.argv[1:])
else:
call_converter()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.