commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
68ee23dd0f145e82ce0f3c0c66109ee435eadc25 | Add programs button element | NejcZupec/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core | src/lib/constants/element/page_header/lhn_menu/button_programs.py | src/lib/constants/element/page_header/lhn_menu/button_programs.py | SELECTOR = '[data-model-name="Program"]'
| apache-2.0 | Python | |
23f626ddaabfa799da48ee35c29db05f95f8a732 | Add import script for Rhondda Cynon Taff | andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_rct.py | polling_stations/apps/data_collection/management/commands/import_rct.py | """
Import Rhondda Cynon Taf
note: this script takes quite a long time to run
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode
from data_collection.google_geocoding_api_wrapper import (
GoogleGeocodingApiWrapper,
PostcodeNotFoundException
)
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Rhondda Cynon Taf
"""
council_id = 'W06000016'
addresses_name = 'PROPERTYLISTINGFORDEMOCRACYCLUB.csv'
stations_name = 'POLLINGSTATIONS8MARCH2016.csv'
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.address1,
record.address2,
record.address3,
record.address4,
record.address5
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
# remove trailing "\n" if present
if address[-1:] == '\n':
address = address[:-1]
# attempt to attach postcode if missing
postcode = record.postcode
if not postcode:
gwrapper = GoogleGeocodingApiWrapper(address, self.council_id, 'UTA')
try:
postcode = gwrapper.address_to_postcode()
except PostcodeNotFoundException:
postcode = ''
"""
No grid references were supplied, so attempt to
derive a grid ref from postcode if we have that
"""
sleep(1.3) # ensure we don't hit mapit's usage limit
if postcode:
try:
gridref = geocode(postcode)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except KeyError:
location = None
else:
location = None
return {
'internal_council_id': record.polling_district,
'postcode' : postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
# format address
address = ", ".join([
record.address1,
record.address2,
record.address3,
record.address4,
record.address5,
record.address6,
])
while ", , " in address:
address = address.replace(", , ", ", ")
# remove trailing ", " if present
if address[-2:] == ', ':
address = address[:-2]
return {
'address' : address,
'postcode' : record.postcode,
'polling_station_id': record.district
}
| bsd-3-clause | Python | |
f257f788287080dfef42aeb18de62531a9c7d3df | add a Twisted executor, using twisted.web.client.Agent | ducksboard/libsaas,livingbio/libsaas,80vs90/libsaas,CptLemming/libsaas | libsaas/executors/twisted_executor.py | libsaas/executors/twisted_executor.py | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import logging
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from twisted.internet import defer, protocol, reactor
from twisted.web import client, http, http_headers
from twisted.web.iweb import IBodyProducer
from zope.interface import implements
from . import base
__all__ = ['TwistedExecutor']
logger = logging.getLogger('libsaas.executor.twisted_executor')
URLENCODE_METHODS = ('GET', 'DELETE', 'HEAD', 'OPTIONS')
class StringBodyProducer(object):
"""
A IBodyProducer that just writes the passed string as-is.
"""
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(self.body)
def startProducing(self, consumer):
consumer.write(self.body)
return defer.succeed(None)
def stopProducing(self):
pass
def pauseProducing(self):
pass
def resumeProducing(self):
pass
class HTTPResponseError(Exception):
"""
The server returned a non 2xx HTTP response.
"""
class HTTPResponseProtocol(protocol.Protocol):
"""
A simple protocol class to interpret data from a Twisted response.
"""
def __init__(self, parser, tolerant=False):
self.parser = parser
self.buffer = StringIO()
self.ok_reasons = [client.ResponseDone]
if tolerant:
self.ok_reasons.append(http.PotentialDataLoss)
def handle_response(self, response):
self.finished = defer.Deferred(self.cancel)
self.code = response.code
self.phrase = response.phrase
self.headers = response.headers
response.deliverBody(self)
return self.finished
def dataReceived(self, data):
self.buffer.write(data)
def connectionLost(self, reason):
if not self.finished:
return
if not reason.check(*self.ok_reasons):
self.finished.errback(reason)
return
try:
headers = dict(self.headers.getAllRawHeaders())
ret = self.parser(self.buffer.getvalue(), self.code, headers)
except:
self.finished.errback()
else:
self.finished.callback(ret)
def cancel(self, d):
self.finished = None
self.stopProducing()
class TwistedExecutor(object):
"""
An executor using Twisted's Agent. It returns Deferreds that fire with the
parsed output.
"""
agent = client.Agent(reactor)
def __init__(self, agent, tolerant):
if agent is not None:
self.agent = agent
self.tolerant = tolerant
def __call__(self, request, parser):
logger.info('requesting %r', request)
uri = request.uri
producer = None
if request.method.upper() in URLENCODE_METHODS:
uri = self.encode_uri(request)
else:
producer = self.body_producer(request.params)
content_type = 'application/x-www-form-urlencoded'
request.headers['Content-Type'] = content_type
logger.debug('request uri: %r, producer: %r, headers: %r',
request.uri, producer, request.headers)
headers = self.prepare_headers(request.headers)
d = self.agent.request(method=request.method, uri=request.uri,
headers=headers, bodyProducer=producer)
return d.addCallback(self.got_response, parser)
def encode_uri(self, request):
if not request.params:
return request.uri
return request.uri + '?' + urlencode(request.params)
def body_producer(self, params):
if not params:
return None
payload = params
if isinstance(params, dict):
payload = urlencode(params)
return StringBodyProducer(payload)
def prepare_headers(self, headers):
prepared = {name: [val] for name, val in headers.items()}
return http_headers.Headers(prepared)
def got_response(self, response, parser):
"""
Handle a Twisted HTTP Response. Read and interpret the entire response
body and report the result. Returns a Deferred that will fire with the
content, possible processed in some way, or errback if there has been
an error reading the response or if the response itself is errorneous.
"""
protocol = HTTPResponseProtocol(parser, self.tolerant)
return protocol.handle_response(response)
def use(agent=None, tolerant=False):
base.use_executor(TwistedExecutor(agent, tolerant))
| mit | Python | |
93fb79bfc3242f991c03f59f0f6991a09e9421d9 | Create insert.py | joshavenue/python_notebook | insert.py | insert.py | x = [1,2,3]
x.insert(1, 1) // Insert 1 into the list 1 //
x = [1,1,2,3]
| unlicense | Python | |
f6c11bf0d3b8960561ce0f2433193543c1f63609 | Add Assignment_1 | KaiYan0729/nyu_python,KaiYan0729/nyu_python,KaiYan0729/nyu_python | programming_with_python/assignment_1/refactored_random_walk.py | programming_with_python/assignment_1/refactored_random_walk.py | #!/usr/bin/env python3
import random
import sys
import math
def get_random_direction():
direction = ""
probability = random.random()
if probability < 0.25:
direction = "west"
displacements = (-1, 0)
elif probability >= 0.25 and probability < 0.5:
direction = "east"
displacements = (1, 0)
elif probability >= 0.5 and probability < 0.75:
direction = "north"
displacements = (0, 1)
else:
direction = "south"
displacements = (0, -1)
#return direction
return displacements
def take_all_walks(steps, runs):
endpoints = []
for run_index in range(runs):
current_location = [0, 0]
for step_index in range(steps):
displacement = get_random_direction()
# extract the numerical values from the tuple
delta_x = displacement[0]
delta_y = displacement[1]
# UPDATE current_location HERE
current_location[0] += delta_x
current_location[1] += delta_y
end_location = current_location
endpoints.append(end_location)
return endpoints
def average_final_distance(endpoints):
total_distance = 0
for coords in endpoints:
dx = coords[0]
dy = coords[1]
# use the Pythagorean theorem to get distance like last session
distance = math.sqrt(dx*dx + dy*dy)
total_distance += distance
return total_distance / len(endpoints)
if __name__ == "__main__":
steps = 10
if len(sys.argv) > 1:
steps = int(sys.argv[1])
runs = 50
if len(sys.argv) > 2:
runs = int(sys.argv[2])
end_locations = take_all_walks(steps, runs)
print("Done with walk, printing end location: ")
print(end_locations)
average_displacement = average_final_distance(end_locations)
print('Average displacement is: ')
print(average_displacement) | mit | Python | |
db7534a83986e404d7f8ed02d42dfa76918d5126 | Create regExpTest.py | stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests | regExpTest.py | regExpTest.py | #!/usr/bin/python
#import the necessary modules
import re # the regexp module
# define the necessary fcns
def lookForCmd( theCommand ):
for commandIndex, commandAgainst in enumerate( commandsToMatchList ): # works > but the above is used to fetch the index AND the item (..)
#for commandAgainst in commandsToMatchList: # works > but the above is used to fetch the index AND the item (..)
#possibleMatch = re.search(r"%s" % commandToMatch, text) # works ?
possibleMatch = re.search(r"%s" % commandAgainst, theCommand) # works ?
# check if the match was successfull and end the iteraction / handle it
if possibleMatch:
print "Matching command found:"
print theCommand
print "Matching command index"
print commandIndex
print "Associated function:"
print commandsAssociatedFcnsList[commandIndex]
# end the iteration as we found the command
#break
else:
# continue to loop until 'cmdsList' has been fully iterated over (..)
print "Matching command not found at this entry index"
text = "Hello world from the Tef"
commandToMatch = "Hello (.*) from the Tef"
#match = re.search(r"Hello (.*) from the Tef", text) # works > finds it
match = re.search(r"%s" % commandToMatch, text) # works > finds it
if match:
print "found!"
else:
print "not found"
commandsToMatchList = [ "Hello (.*) from the Tef", "Do (.*) I tell you to", "Suck (.*) dummy!"]
commandsAssociatedFcnsList = [ "HelloFromTef()", "DoIt", "suckMyBalls"]
print commandsToMatchList # work > simply prints out the lists of recognized commands
print "\n"
lookForCmd( text )
| mit | Python | |
6a47701ea874e657475542809ac0f9320063cb9b | Add script to help create release zip | james91b/ida_ipython,james91b/ida_ipython,tmr232/ida_ipython,james91b/ida_ipython | releasezip.py | releasezip.py | import os
import sys
import zipfile
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def main(version) :
release = zipfile.ZipFile('release-{}.zip'.format(version), 'w')
zipdir('python', release)
zipdir('idc', release)
zipdir('notebook', release)
release.write('build/release/ida_ipython.p64', 'plugins/ida_ipython.p64')
release.write('build/release/ida_ipython.plw', 'plugins/ida_ipython.plw')
release.write('README.md')
release.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
main(sys.argv[1])
else:
print "No release name provided" | mit | Python | |
ce5187d060978021f71f4045642665448ace72f7 | Add plotly based grapher tool as Jupyter notebook cell | CiscoSystems/os-sqe,CiscoSystems/os-sqe,CiscoSystems/os-sqe | jupyter/plot.py | jupyter/plot.py | # to be executed as Jupyter notebook
get_ipython().system('pip install plotly --upgrade')
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
import json
import plotly.graph_objs as go
print(__version__) # requires version >= 1.9.0
init_notebook_mode() # run at the start of every ipython notebook to use plotly.offline this injects the plotly.js source files into the notebook
sce_t = []
sce_y = []
fi_t = []
fi_y = []
n9_t = []
n9_y = []
ex_t = []
ex_y = []
ex_text = []
st_t = []
st_y = []
st_text = []
with open('C:\\Users\\kshileev\\desktop\\logs\\json_new.log') as f:
for line in f:
j = json.loads(line)
timestamp = j['@timestamp']
if j.get('status', 'None') == 'Start':
st_t += [timestamp]
st_y += [10]
st_text += [j['name']]
if 'n_ports' in j:
sce_t += [timestamp]
sce_y += [j['n_ports']]
if 'ucsm' in j['name'] and 'n_vlans' in j:
fi_t += [timestamp]
fi_y += [j['n_vlans']]
if 'nexus' in j['name'] and 'n_vlans' in j:
if 'service' not in j:
n9_t += [timestamp]
n9_y += [j['n_vlans']]
if 'EXCEPTION' in j:
ex_t += [timestamp]
ex_y += [1]
ex_text += [j['EXCEPTION']]
sce = go.Scatter(x=sce_t, y=sce_y, text=['th net-sub-port']*len(sce_t), name='load', mode='markers')
fi = go.Scatter(x=fi_t, y=fi_y, text=['vlans']*len(fi_t), name='FI', mode='markers')
n9 = go.Scatter(x=n9_t, y=n9_y, text=['vlans']*len(n9_t), name='N9K', mode='markers')
ex = go.Scatter(x=ex_t, y=ex_y, text=ex_text, name='Exceptions={0}'.format(len(ex_t)), mode='markers')
st = go.Scatter(x=st_t, y=st_y, text=st_text, name='Starts={0}'.format(len(st_t)), mode='markers')
data = [st, ex, sce, fi, n9]
iplot(data)
| apache-2.0 | Python | |
d1166b9e28920a7303802cb80ff06bcb7c1d074f | Add BEST model to examples. | evidation-health/pymc3,Anjum48/pymc3,tyarkoni/pymc3,evidation-health/pymc3,clk8908/pymc3,wanderer2/pymc3,jameshensman/pymc3,MichielCottaar/pymc3,superbobry/pymc3,CVML/pymc3,jameshensman/pymc3,CVML/pymc3,JesseLivezey/pymc3,Anjum48/pymc3,hothHowler/pymc3,hothHowler/pymc3,MCGallaspy/pymc3,MichielCottaar/pymc3,dhiapet/PyMC3,kmather73/pymc3,kmather73/pymc3,tyarkoni/pymc3,dhiapet/PyMC3,MCGallaspy/pymc3,clk8908/pymc3,arunlodhi/pymc3,JesseLivezey/pymc3,wanderer2/pymc3,superbobry/pymc3,arunlodhi/pymc3 | pymc3/examples/best.py | pymc3/examples/best.py | """
Bayesian Estimation Supersedes the T-Test
This model replicates the example used in:
Kruschke, John. (2012) Bayesian estimation supersedes the t test. Journal of Experimental Psychology: General.
The original pymc2 implementation was written by Andrew Straw and can be found here: https://github.com/strawlab/best
Ported to PyMC3 by Thomas Wiecki (c) 2015.
"""
import numpy as np
import pymc3 as pm
drug = (101,100,102,104,102,97,105,105,98,101,100,123,105,103,100,95,102,106,
109,102,82,102,100,102,102,101,102,102,103,103,97,97,103,101,97,104,
96,103,124,101,101,100,101,101,104,100,101)
placebo = (99,101,100,101,102,100,97,101,104,101,102,102,100,105,88,101,100,
104,100,100,100,101,102,103,97,101,101,100,101,99,101,100,100,
101,100,99,101,100,102,99,100,99)
y1 = np.array(drug)
y2 = np.array(placebo)
y = np.concatenate((y1, y2))
mu_m = np.mean( y )
mu_p = 0.000001 * 1/np.std(y)**2
sigma_low = np.std(y)/1000
sigma_high = np.std(y)*1000
with pm.Model() as model:
group1_mean = pm.Normal('group1_mean', mu=mu_m, tau=mu_p, testval=y1.mean())
group2_mean = pm.Normal('group2_mean', mu=mu_m, tau=mu_p, testval=y2.mean())
group1_std = pm.Uniform('group1_std', lower=sigma_low, upper=sigma_high, testval=y1.std())
group2_std = pm.Uniform('group2_std', lower=sigma_low, upper=sigma_high, testval=y2.std())
nu = pm.Exponential('nu_minus_one', 1/29.) + 1
lam1 = group1_std**-2
lam2 = group2_std**-2
group1 = pm.T('drug', nu=nu, mu=group1_mean, lam=lam1, observed=y1)
group2 = pm.T('placebo', nu=nu, mu=group2_mean, lam=lam2, observed=y2)
diff_of_means = pm.Deterministic('difference of means', group1_mean - group2_mean)
diff_of_stds = pm.Deterministic('difference of stds', group1_std - group2_std)
effect_size = pm.Deterministic('effect size', diff_of_means / pm.sqrt((group1_std**2 + group2_std**2) / 2))
step = pm.NUTS()
def run(n=3000):
if n == "short":
n = 500
with model:
trace = pm.sample(n, step)
burn = n/10
pm.traceplot(trace[burn:]);
pm.plots.summary(trace[burn:])
if __name__ == '__main__':
run()
| apache-2.0 | Python | |
85eaf2b86aefc7697af78f457888652f8a2bbd3d | Add tx.origin check | b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril | mythril/analysis/modules/tx_origin.py | mythril/analysis/modules/tx_origin.py | from z3 import *
from mythril.analysis.report import Issue
import re
'''
MODULE DESCRIPTION:
Check for constraints on tx.origin (i.e., access to some functionality is restricted to a specific origin).
'''
def execute(statespace):
issues = []
for k in statespace.nodes:
node = statespace.nodes[k]
for constraint in node.constraints:
if(re.search(r'origin', str(constraint))):
issue = Issue(node.module_name, node.function_name, None, "Use of tx.origin", "Warning", \
"Access to the function " + node.function_name + " is granted based on tx.origin. Use tx.sender instead.\nSee also: https://solidity.readthedocs.io/en/develop/security-considerations.html#tx-origin"
)
issues.append(issue)
return issues
| mit | Python | |
0995c204e5e05b0a1ee5f8c00dbae58f1f114386 | Add integrations credentials manager | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/integrations/credentials/manager.py | dbaas/integrations/credentials/manager.py | import models
import logging
LOG = logging.getLogger(__name__)
class IntegrationCredentialManager(object):
@classmethod
def get_credentials(cls, environment, integration):
return models.IntegrationCredential.objects.filter(environments=environment, integration_type=integration)[0] | bsd-3-clause | Python | |
2dfc4fcc61c0f9d00860168d44da5e03db8e61eb | Add a class to get a random photo of a folder | MarkusAmshove/Photobox | photobox/cheesefolder.py | photobox/cheesefolder.py | import random
class Cheesefolder():
def __init__(self, folder):
self.directory = folder
pass
def getrandomphoto(self):
files = self.directory.getfiles_fullpath()
filecount = len(files)
randomindex = random.randint(0, filecount - 1)
return files[randomindex]
| mit | Python | |
5ed3e15f200fbec3a5e2aa284e044206a71d5ed4 | Create frus_pdf_urls.py | thomasgpadilla/webscraping | frus_pdf_urls.py | frus_pdf_urls.py | from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
html = urlopen("http://digicoll.library.wisc.edu/cgi-bin/FRUS/FRUS-idx?type=header&id=FRUS.FRUS1932v05")
soup = BeautifulSoup(html, 'html.parser')
with open("fdr_pdf_urls.txt", "w") as file:
for itemlinks in soup.find_all("p", {"class":"cntsitem"}):
for link in itemlinks.find_all('a'):
itemurl = 'http://digicoll.library.wisc.edu/' + link.get('href')
pdfurl = requests.get (itemurl)
pdfsoup=BeautifulSoup(pdfurl.content, 'html.parser')
for pdflink in pdfsoup.find_all("div", {"class":"itemmd"}):
for plink in pdflink.find_all('a'):
print (plink.get('href'))
file.write(plink.get('href')+"\n")
| cc0-1.0 | Python | |
50b3396ef0df860531fe05778767cfd8a5be32c3 | Solve for ini5 | DanZBishop/Rosalind | ini5/ini5.py | ini5/ini5.py | #!/usr/bin/python
import argparse
parser = argparse.ArgumentParser(description="Outputs even numbered lines from input file")
parser.add_argument("--input", "-i", required=True, help="Input file name")
parser.add_argument("-o", "--output", required=True, help="Output file name")
args = parser.parse_args()
inFile = open(args.input, "r")
outFile = open(args.output, "w")
count = 0
for line in inFile:
count += 1
if count % 2 == 0:
outFile.write(line)
outFile.close() | apache-2.0 | Python | |
2180ff5c7576100300898099a2316c66a8e56eb6 | add parser for inline lang ignore comments | myint/rstcheck,myint/rstcheck | src/rstcheck/inline_config.py | src/rstcheck/inline_config.py | """Inline config comment functionality."""
import re
import typing
RSTCHECK_COMMENT_REGEX = re.compile(r"\.\. rstcheck:")
class RstcheckCommentSyntaxError(Exception):
"""Syntax error for rstcheck inline config comments."""
def __init__(self, message: str, line_number: int) -> None:
"""Initialize the ``RstcheckCommentSyntaxError`` exception.
:param message: Error message
:param line_number: Line number where the error occured
"""
self.line_number = line_number
Exception.__init__(self, message)
def find_ignored_languages(source: str) -> typing.Generator[str, None, None]: # noqa: CCR001
"""Search the rst source for rstcheck inline ignore-languages comments.
Languages are ignored via comment.
For example, to ignore C++, JSON, and Python:
>>> list(find_ignored_languages('''
... Example
... =======
...
... .. rstcheck: ignore-languages=cpp,json
...
... .. rstcheck: ignore-languages=python
... '''))
["cpp", "json", "python"]
:param source: Rst source code
:raises RstcheckCommentSyntaxError: When the comment has invalid syntax
:return: None
:yield: Found languages to ignore
"""
for (index, line) in enumerate(source.splitlines()):
match = RSTCHECK_COMMENT_REGEX.match(line)
if match:
key_and_value = line[match.end() :].strip().split("=")
if len(key_and_value) != 2:
raise RstcheckCommentSyntaxError(
'Expected "key=value" syntax', line_number=index + 1
)
if key_and_value[0] == "ignore-languages":
for language in key_and_value[1].split(","):
yield language.strip()
| mit | Python | |
fb9bbe0a57e1dc083d25d28c7f7fc0b4633c96e0 | add Wikipedia plugin | spaceone/tehbot,tehron/tehbot | plugins/wiki/__init__.py | plugins/wiki/__init__.py | import plugins
import urllib
import urllib2
import json
import lxml.html
url = "https://en.wikipedia.org/w/api.php?%s"
def get_text(tree, xpath):
return "\n".join(e.text_content() for e in tree.xpath(xpath))
def wiki(connection, channel, nick, cmd, args):
"""Looks up a search term on Wikipedia"""
if not args:
return plugins.print_help(connection, channel, nick, None, cmd)
data = {
"action" : "opensearch",
"limit" : 1,
"format" : "json",
"search" : plugins.to_utf8(args)
}
req = urllib2.Request(url % urllib.urlencode(data))
pageurl = json.load(urllib2.urlopen(req))[-1]
txt = "[Wikipedia] "
if not pageurl:
return plugins.say(connection, channel, txt + "Search didn't find anything.")
tree = lxml.html.parse(urllib2.urlopen(pageurl[0]))
title = get_text(tree, "//h1[@id='firstHeading']")
short = get_text(tree, "//div[@id='mw-content-text']/p")
if not title or not short:
return plugins.say(connection, channel, txt + "Something went wrong.")
txt += "\x02%s\x0f\n%s" % (title, short[:300])
plugins.say(connection, channel, txt)
plugins.register_pub_cmd("wiki", wiki)
| mit | Python | |
6b3ab0cf35f20168d072d1df30dd8c4360385209 | disable paths | wathsalav/xos,zdw/xos,open-cloud/xos,zdw/xos,cboling/xos,xmaruto/mcord,xmaruto/mcord,open-cloud/xos,jermowery/xos,wathsalav/xos,xmaruto/mcord,wathsalav/xos,zdw/xos,cboling/xos,cboling/xos,opencord/xos,opencord/xos,zdw/xos,open-cloud/xos,xmaruto/mcord,jermowery/xos,cboling/xos,jermowery/xos,jermowery/xos,wathsalav/xos,cboling/xos,opencord/xos | plstackapi/planetstack/views/api_root.py | plstackapi/planetstack/views/api_root.py | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'roles': reverse('role-list', request=request, format=format),
#'nodes': reverse('node-list', request=request, format=format),
#'sites': reverse('site-list', request=request, format=format),
#'deploymentNetworks': reverse('deploymentnetwork-list', request=request, format=format),
#'slices': reverse('slice-list', request=request, format=format)
})
| from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'nodes': reverse('node-list', request=request, format=format),
'sites': reverse('site-list', request=request, format=format),
'deploymentNetworks': reverse('deploymentnetwork-list', request=request, format=format),
'slices': reverse('slice-list', request=request, format=format)
})
| apache-2.0 | Python |
0bef4682c6a81464fd6e72fddd6f0b5957f4d566 | Add a script to calculate paried roi bbox emean and std. | myfavouritekk/TPN | tools/data/calculate_paired_bbox_mean_std.py | tools/data/calculate_paired_bbox_mean_std.py | #!/usr/bin/env python
import argparse
import scipy.io as sio
import sys
import os.path as osp
import numpy as np
import cPickle
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.bbox_transform import bbox_transform
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('paired_gt_file')
parser.add_argument('save_mean_file')
parser.add_argument('save_std_file')
args = parser.parse_args()
deltas = []
gts = sio.loadmat(args.paired_gt_file)['gt']
for gt1, gt2 in gts:
if len(gt1) == 0: continue
deltas.append(bbox_transform(gt1, gt2))
delta = np.vstack(deltas)
mean = np.mean(delta, axis=0)
std = np.std(delta, axis=0)
with open(args.save_mean_file, 'wb') as f:
cPickle.dump(mean, f, cPickle.HIGHEST_PROTOCOL)
with open(args.save_std_file, 'wb') as f:
cPickle.dump(std, f, cPickle.HIGHEST_PROTOCOL)
| mit | Python | |
c6951374eba137614744928c18fa4d34de5c5d89 | Add a script to generate data sets for testing sorting algorithms. | vikash-india/ProgrammingProblems,vikash-india/ProgrammingProblems | src/algorithms/sorting/dataset_generator.py | src/algorithms/sorting/dataset_generator.py | # Description: Script to Generate Data Sets For Sorting Algorithms
import random
import logging
# Global Configuration
TOTAL_ROWS = 10
SORTED = False # Overrides REVERSE_SORTED and RANDOM_NUMBERS
REVERSE_SORTED = False # Overrides RANDOM_NUMBERS
RANDOM_NUMBERS = True # Least Precedence
WRITE_TO_FILE = False
def configure_logging(write_to_file_enabled):
"""Configure Logging Based on Global Configurations."""
# Set logging level from DEBUG, INFO, WARNING. ERROR, CRITICAL
level = logging.DEBUG
# Do not print debug messages when writing to file is enabled.
if (write_to_file_enabled):
level = logging.INFO
# Configure Log Level
logging.basicConfig(level=level)
def generate_numbers():
"""Generate a list of numbers based on Global Configurations"""
if SORTED:
numbers = range(1, TOTAL_ROWS + 1)
elif REVERSE_SORTED:
numbers = range(TOTAL_ROWS, 0, -1)
elif RANDOM_NUMBERS:
numbers = range(1, TOTAL_ROWS + 1)
random.shuffle(numbers)
random.shuffle(numbers)
random.shuffle(numbers)
logging.debug(numbers)
return numbers
def write_to_file(numbers, filename):
"""Write to file based on Global Configurations."""
if WRITE_TO_FILE:
logging.info('Writing data to file: {0}'.format(filename))
with open(filename, 'w') as file_handle:
for item in numbers:
file_handle.write(str(item) + '\n')
def main():
"""Main function."""
configure_logging(WRITE_TO_FILE)
# Generate numbers based on configurations
numbers = generate_numbers()
# Write numbers to a file
# Filenames Examples: dataset_10_reverse_sorted.txt, dataset_100_sorted.txt, dataset_1000_random.txt etc.
filename="dataset/dataset_{0}_{1}.txt".format(TOTAL_ROWS, 'sorted' if SORTED else 'reverse_sorted' if REVERSE_SORTED else 'random')
write_to_file(numbers, filename)
# Call Main
main()
| mit | Python | |
45917087377adef01a4d4ce829013a7958a3afe5 | Add PC / DWPC test | dhimmel/hetio | test/pathtools_test.py | test/pathtools_test.py | import os
import pytest
import hetio.readwrite
from hetio.pathtools import paths_between, DWPC
directory = os.path.dirname(os.path.abspath(__file__))
def test_disease_gene_example_dwpc():
"""
Test the DWPC computation from
https://doi.org/10.1371/journal.pcbi.1004259.g002
"""
path = os.path.join(directory, 'data', 'disease-gene-example-graph.json')
graph = hetio.readwrite.read_graph(path)
metagraph = graph.metagraph
# Define traversal
metapath = metagraph.metapath_from_abbrev('GiGaD')
source_id = 'Gene', 'IRF1'
target_id = 'Disease', 'Multiple Sclerosis'
# Extract paths
paths = paths_between(graph, source_id, target_id, metapath)
assert len(paths) == 3
# Test degree-weighted path count
dwpc = DWPC(paths, damping_exponent=0.5)
assert dwpc == pytest.approx(0.25 + 0.25 + 32**-0.5)
| cc0-1.0 | Python | |
5e8e48895778d8a55b010320dec6befbdaf85af0 | add tests for types.enums | NoMoKeTo/choo,NoMoKeTo/transit | src/tests/types/test_enums.py | src/tests/types/test_enums.py | import pytest
from choo.types import LineType, LineTypes, PlatformType, POIType, WalkSpeed, WayEvent, WayType
class TestEnums:
def test_serialize(self):
assert WayType.walk.serialize() == 'walk'
def test_unserialize(self):
assert WayType.unserialize('walk') is WayType.walk
with pytest.raises(AttributeError):
WayType.unserialize('unknown')
with pytest.raises(AttributeError):
WayType.unserialize('serialize')
def test_contains(self):
assert WayEvent.stairs_up in WayEvent.stairs
assert WayEvent.stairs_up in WayEvent.any
assert WayEvent.stairs in WayEvent.any
assert WayEvent.stairs not in WayEvent.stairs_up
assert WayEvent.stairs not in WayEvent.down
assert WayEvent.stairs not in WayEvent.elevator
assert WayEvent.any not in WayEvent.up
def test_iter(self):
assert set(WayEvent.stairs) == {WayEvent.stairs, WayEvent.stairs_up, WayEvent.stairs_down}
def test_contained_in(self):
assert set(WayEvent.up.contained_in()) == {WayEvent.up, WayEvent.any}
def test_repr(self):
assert repr(WayType.walk) == 'WayType.walk'
assert repr(WayEvent.stairs) == 'WayEvent.stairs'
assert repr(WalkSpeed.normal) == 'WalkSpeed.normal'
assert repr(LineType.train) == 'LineType.train'
assert repr(POIType.parking) == 'POIType.parking'
assert repr(PlatformType.street) == 'PlatformType.street'
class TestLineTypes:
def test_init(self):
LineTypes(LineType.bus)
with pytest.raises(TypeError):
assert LineTypes(1)
def test_contains(self):
assert LineType.train in LineTypes.any
assert LineType.any in LineTypes(LineType.train)
assert LineType.bus in LineTypes(LineType.bus_regional)
assert LineType.train not in LineTypes(LineType.bus_regional)
with pytest.raises(TypeError):
assert 1 in LineTypes.any
def test_exclude(self):
assert LineType.tram in LineTypes.any.exclude(LineType.train, LineType.bus)
assert LineType.bus_regional not in LineTypes.any.exclude(LineType.train, LineType.bus)
with pytest.raises(TypeError):
assert LineTypes.any.exclude(1)
def test_include(self):
assert LineType.tram not in LineTypes.none.include(LineType.train, LineType.bus)
assert LineType.bus_regional in LineTypes.none.include(LineType.train, LineType.bus)
with pytest.raises(TypeError):
assert LineTypes.none.include(1)
def test_repr(self):
linetypes = LineTypes.any.exclude(LineType.bus)
assert set(eval(repr(linetypes))) == set(linetypes)
| apache-2.0 | Python | |
ab8fefb20256a9d800804a0465a4cac7d47019dd | add test_alignment script | cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,mithro/HDMI2USB-litex-firmware,cr1901/HDMI2USB-litex-firmware | test/test_alignment.py | test/test_alignment.py | #!/usr/bin/env python3
import time
from litex.soc.tools.remote import RemoteClient
from litescope.software.driver.analyzer import LiteScopeAnalyzerDriver
wb = RemoteClient()
wb.open()
# # #
DVISAMPLER_DELAY_RST = 0x1
DVISAMPLER_DELAY_INC = 0x2
DVISAMPLER_DELAY_DEC = 0x4
DVISAMPLER_TOO_LATE = 0x1
DVISAMPLER_TOO_EARLY = 0x2
def configure_delay(channel, delay):
if channel == 0:
wb.regs.hdmi_in0_data0_cap_dly_ctl.write(DVISAMPLER_DELAY_RST)
for i in range(delay):
wb.regs.hdmi_in0_data0_cap_dly_ctl.write(DVISAMPLER_DELAY_INC)
elif channel == 1:
wb.regs.hdmi_in0_data1_cap_dly_ctl.write(DVISAMPLER_DELAY_RST)
for i in range(delay):
wb.regs.hdmi_in0_data1_cap_dly_ctl.write(DVISAMPLER_DELAY_INC)
elif channel == 2:
wb.regs.hdmi_in0_data2_cap_dly_ctl.write(DVISAMPLER_DELAY_RST)
for i in range(delay):
wb.regs.hdmi_in0_data2_cap_dly_ctl.write(DVISAMPLER_DELAY_INC)
else:
ValueError
def get_phase_status(channel):
phase_status = 0
if channel == 0:
wb.regs.hdmi_in0_data0_cap_phase_reset.write(1)
time.sleep(0.1)
phase_status = (wb.regs.hdmi_in0_data0_cap_phase.read() & 0x3)
elif channel == 1:
wb.regs.hdmi_in0_data1_cap_phase_reset.write(1)
time.sleep(0.1)
phase_status = (wb.regs.hdmi_in0_data1_cap_phase.read() & 0x3)
elif channel == 2:
wb.regs.hdmi_in0_data2_cap_phase_reset.write(1)
time.sleep(0.1)
phase_status = (wb.regs.hdmi_in0_data2_cap_phase.read() & 0x3)
else:
ValueError
return (phase_status == DVISAMPLER_TOO_LATE,
phase_status == DVISAMPLER_TOO_EARLY)
for channel in range(3):
for delay in range(32):
configure_delay(channel, delay)
too_late, too_early = get_phase_status(channel)
print("CHAN: {:d} / DELAY: {:d} / TOO_LATE: {:d} / TOO_EARLY: {:d}".format(
channel, delay, too_late, too_early))
# # #
wb.close()
| bsd-2-clause | Python | |
a3305a53d1007ee48b4dadd37c11efb2d78da7f9 | Add unittests for utils.ichunked() | eht16/python-logstash-async | tests/ichunked_test.py | tests/ichunked_test.py | # -*- coding: utf-8 -*-
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from random import randint
import unittest
from logstash_async.utils import ichunked
CHUNK_SIZE_SMALL = 1
CHUNK_SIZE_NORMAL = 100
CHUNK_SIZE_BIG = 750
CHUNK_ITERATIONS = 5
class IChunkedTest(unittest.TestCase):
# ----------------------------------------------------------------------
def _test_chunking(self, chunk_size, chunk_iterations):
# test data
random_extra_chunk_size = randint(0, chunk_size - 1)
test_sequence_size = chunk_size * chunk_iterations + random_extra_chunk_size
test_sequence = list(range(test_sequence_size))
# keep results for assertions
iterations = 0
iterated_elements = list()
# test
for sequence_subset in ichunked(test_sequence, chunk_size):
iterations += 1
iterated_elements.extend(sequence_subset)
self.assertLessEqual(len(sequence_subset), chunk_size)
expected_iterations = chunk_iterations
if random_extra_chunk_size > 0:
expected_iterations += 1 # add 1 because of 'random_extra_chunk_size'
self.assertListEqual(iterated_elements, test_sequence)
self.assertEqual(iterations, expected_iterations)
# ----------------------------------------------------------------------
def test_chunks_big_iterations_fixed(self):
self._test_chunking(CHUNK_SIZE_BIG, CHUNK_ITERATIONS)
# ----------------------------------------------------------------------
def test_chunks_big_iterations_random(self):
chunk_iterations = randint(3, 20)
self._test_chunking(CHUNK_SIZE_BIG, chunk_iterations)
# ----------------------------------------------------------------------
def test_chunks_normal_iterations_fixed(self):
self._test_chunking(CHUNK_SIZE_NORMAL, CHUNK_ITERATIONS)
# ----------------------------------------------------------------------
def test_chunks_normal_iterations_random(self):
chunk_iterations = randint(3, 20)
self._test_chunking(CHUNK_SIZE_NORMAL, chunk_iterations)
# ----------------------------------------------------------------------
def test_chunks_small_iterations_fixed(self):
self._test_chunking(CHUNK_SIZE_SMALL, CHUNK_ITERATIONS)
# ----------------------------------------------------------------------
def test_chunks_small_iterations_random(self):
chunk_iterations = randint(3, 20)
self._test_chunking(CHUNK_SIZE_SMALL, chunk_iterations)
# ----------------------------------------------------------------------
def test_empty_sequence(self):
chunk_size = 5
test_sequence = list()
# keep results for assertions
iterations = 0
iterated_elements = list()
# test
for sequence_subset in ichunked(test_sequence, chunk_size):
iterations += 1
iterated_elements.extend(sequence_subset)
self.assertLessEqual(len(sequence_subset), chunk_size)
expected_iterations = 0
self.assertListEqual(iterated_elements, test_sequence)
self.assertEqual(iterations, expected_iterations)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
5d67f1873994990b3c63820ec85476d9ed7c7303 | Add from_frames | pynayzr/pynayzr | scripts/from_frames.py | scripts/from_frames.py | #!/usr/bin/python
import argparse
import glob
import os
import pathlib
import statistics
import pynayzr
import imgcompare
from PIL import Image, ImageDraw
# Based on 240p
REFERENCE_BOX = {
'tvbs': (50, 50, 150, 63),
'ftv': (80, 43, 89, 63),
}
def get_time(t):
return f'{t // 60:02d}:{t % 60:02d}'
def get_frame_time(filename):
return int(os.path.splitext(os.path.basename(filename))[0].split('_')[0])
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--news', type=str, required=True)
parser.add_argument('-d', '--dir', type=str, required=True)
parser.add_argument('-o', '--output', type=str, required=True)
parser.add_argument('--reference', nargs='+', type=int)
parser.add_argument('--threshold', type=int, default=10)
return parser
def main():
args = get_parser().parse_args()
news = args.news
dir_path = args.dir
out_dir = pathlib.Path(args.output)
if not out_dir.exists():
out_dir.mkdir()
for i in sorted(glob.glob(f'{dir_path}/*.jpg')):
nm = pynayzr.NewsModel(news, image_path=i)
filename = os.path.splitext(os.path.basename(i))[0]
nm.save_all(out_dir.joinpath(filename))
images = sorted(glob.glob(f'{out_dir}/*_title.jpg'))
bottoms = sorted(glob.glob(f'{out_dir}/*_bottom.jpg'))
# Compare base
current = images[0]
current_i = 0
# Unique title, (start, end), duration
uniq = []
times = []
durations = []
# Static part's box
# NOTE: This will compare with "bottom", not title
if args.reference:
cp_box = args.reference
elif news in REFERENCE_BOX:
cp_box = REFERENCE_BOX[news]
else:
# XXX: Should warn user?
cp_box = (0, 0, 1, 1)
cp = Image.open(bottoms[0]).crop(cp_box)
# Couting for continuous frame
continuous = 0
# Compare the images
prev = current
for index, i in enumerate(images[1:]):
continuous += 1
if not imgcompare.is_equal(current, i, 12):
if continuous > args.threshold:
im = Image.open(bottoms[current_i + 1])
if imgcompare.is_equal(cp, im.crop(cp_box), 3):
uniq.append(prev)
print(prev)
times.append((get_frame_time(current),
get_frame_time(i)))
durations.append(times[-1][1] - times[-1][0])
current = i
current_i = index
continuous = 0
prev = i
# Print the information
print(len(images), len(uniq),
f'mean: {statistics.mean(durations):.2f}, '
f'stdev: {statistics.stdev(durations):.2f}, '
f'median: {statistics.median(durations)}')
# Output
TIME_DURATION_SPACE = 100
w, h = Image.open(uniq[0]).size
w += TIME_DURATION_SPACE # Give some space for time
im = Image.new('RGB', (w, h * len(uniq)), '#FFFFFF')
for index, i in enumerate(uniq):
duration = ' ~ '.join(
[get_time(times[index][0]),get_time(times[index][1] - 1)])
im.paste(Image.open(i), (0, h * index))
ImageDraw.Draw(im).text(
(w - (TIME_DURATION_SPACE - 10), h * index + 10),
duration,
(0, 0, 0))
im.save('out.jpg')
if __name__ == '__main__':
main()
| mit | Python | |
35676a61b6fef366d30733dd32211adde9572209 | Add runmailer management command | pinax/django-mailer,pinax/django-mailer | src/mailer/management/commands/runmailer.py | src/mailer/management/commands/runmailer.py | import sys
from datetime import datetime
from django.core.management import BaseCommand
from mailer.engine import send_loop
class Command(BaseCommand):
"""Start the django-mailer send loop"""
def handle(self, *args, **options):
self.stdout.write(datetime.now().strftime('%B %d, %Y - %X'))
self.stdout.write('Starting django-mailer send loop.')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write('Quit the loop with %s.' % quit_command)
send_loop()
| mit | Python | |
824f99b5d0a0c5d4b3076d0a5a8991e9fe93ceb1 | Initialize time table data. | malaonline/Android,malaonline/iOS,malaonline/Server,malaonline/Server,malaonline/Android,malaonline/iOS,malaonline/Server,malaonline/Android,malaonline/iOS,malaonline/Server | server/app/migrations/0008_timetable.py | server/app/migrations/0008_timetable.py | import os
from datetime import time
from django.db import migrations
def add_timetable(apps, schema_editor):
TimeTable = apps.get_model('app', 'TimeTable')
for weekday in range(1, 8):
for start in (8, 10, 13, 15, 17, 19):
end = start + 2
time_table = TimeTable(weekday=weekday, start=time(start),
end=time(end))
time_table.save()
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
('app', '0002_subject'),
('app', '0003_region'),
('app', '0004_grade'),
('app', '0005_gradesubject'),
('app', '0006_level'),
('app', '0007_role'),
]
operations = [
migrations.RunPython(add_timetable),
]
| mit | Python | |
676164caf2e498bef0294a7fe2ad0daf94da33bf | Put import statements into __init__ | abigailStev/stingray,pabell/stingray,evandromr/stingray,StingraySoftware/stingray | stingray/modeling/__init__.py | stingray/modeling/__init__.py | # Licensed under MIT license - see LICENSE.rst
"""
Library of Time Series Methods For Astronomical X-ray Data.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from .._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
# from stingray.lightcurve import *
# from stingray.utils import *
# from stingray.powerspectrum import *
from parametricmodels import *
from posterior import *
from parameterestimation import *
| mit | Python | |
0875ddf7966ed0ca4ac336ac4f52c282c8ceb021 | create lookup function to assist with player specific queries | jldbc/pybaseball | pybaseball/playerid_lookup.py | pybaseball/playerid_lookup.py | import pandas as pd
import requests
import io
# dropped key_uuid. looks like a has we wouldn't need for anything.
# TODO: allow for typos. String similarity?
# TODO: allow user to submit list of multiple names
def get_lookup_table():
print('Gathering player lookup table. This may take a moment.')
url = "https://raw.githubusercontent.com/chadwickbureau/register/master/data/people.csv"
s=requests.get(url).content
table = pd.read_csv(io.StringIO(s.decode('utf-8')))
#subset columns
cols_to_keep = ['name_last','name_first','key_mlbam', 'key_retro', 'key_bbref', 'key_fangraphs', 'mlb_played_first','mlb_played_last']
table = table[cols_to_keep]
#make these lowercase to avoid capitalization mistakes when searching
table['name_last'] = table['name_last'].str.lower()
table['name_first'] = table['name_first'].str.lower()
# Pandas cannot handle NaNs in integer columns. We need IDs to be ints for successful queries in statcast, etc.
# Workaround: replace ID NaNs with -1, then convert columns to integers. User will have to understand that -1 is not a valid ID.
table[['key_mlbam', 'key_fangraphs']] = table[['key_mlbam', 'key_fangraphs']].fillna(-1)
table[['key_mlbam', 'key_fangraphs']] = table[['key_mlbam', 'key_fangraphs']].astype(int) # originally returned as floats which is wrong
return table
def playerid_lookup(last, first=None):
# force input strings to lowercase
last = last.lower()
if first:
first = first.lower()
table = get_lookup_table()
if first is None:
results = table.loc[table['name_last']==last]
else:
results = table.loc[(table['name_last']==last) & (table['name_first']==first)]
#results[['key_mlbam', 'key_fangraphs', 'mlb_played_first', 'mlb_played_last']] = results[['key_mlbam', 'key_fangraphs', 'mlb_played_first', 'mlb_played_last']].astype(int) # originally returned as floats which is wrong
return results
# data = playerid_lookup('bonilla')
# data = playerid_lookup('bonilla', 'bobby')
| mit | Python | |
a42f97104d2332f367041592b9ac05291c3b0240 | Add scorekeeper plugin | jk0/pyhole,jk0/pyhole,jk0/pyhole | pyhole/plugins/scorekeeper.py | pyhole/plugins/scorekeeper.py | # Copyright 2015 Rick Harris
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Scorekeeper Plugin
Scorekeeper is a fun way to high-five someone or playfully boo them in the IRC
room.
Each user gets a score which can be incremented like:
<nick>++
The score can also be decremented like:
<nick>--
The running highscore can displayed with:
.score
An individual's score can be displayed with:
.score <nick>
"""
from pyhole.core import plugin, utils
class Scorekeeper(plugin.Plugin):
"""Each user gets a score. Do with it what you want."""
@plugin.hook_add_msg_regex("^(.+)\+\+")
@utils.spawn
def increment_score(self, message, match, **kwargs):
nick = match.group(1)
self._adjust_score(message, nick, 1)
@plugin.hook_add_msg_regex("^(.+)\-\-")
@utils.spawn
def decrement_score(self, message, match, **kwargs):
nick = match.group(1)
self._adjust_score(message, nick, -1)
def _adjust_score(self, message, nick, delta):
score = self._get_score(nick)
if score is None:
score = 0
score += delta
utils.write_file(self.name, nick, str(score))
message.dispatch(score)
def _get_score(self, nick):
score = utils.read_file(self.name, nick)
if score is None:
return None
try:
return int(score)
except ValueError:
return None
def _display_highscores(self, message):
scores = []
for nick in utils.list_files(self.name):
score = self._get_score(nick)
scores.append((score, nick))
if not scores:
message.dispatch("No scores yet")
return
message.dispatch("Highscores (Top 5)")
message.dispatch("==================")
scores.sort(reverse=True)
for score, nick in scores[:5]:
message.dispatch("%s %s" % (str(score).rjust(4),
nick.rjust(13)))
@plugin.hook_add_command("score")
@utils.spawn
def score(self, message, params=None, **kwargs):
"""Display highscores"""
if params:
nick = params.strip()
score = self._get_score(nick)
if score is None:
return message.dispatch(
"No score found for '%s'" % nick)
else:
message.dispatch(score)
return
else:
return self._display_highscores(message)
| apache-2.0 | Python | |
73a9fb37479f3b4009f8f69f1c83c739479bee6f | Add index route (git hash) | cgwire/zou | zou/app/resources/index.py | zou/app/resources/index.py | from flask_restful import Resource
from zou.app.utils import git
class IndexResource(Resource):
def get(self):
git_hash = git.get_git_revision_hash()
return {
'api': 'Unit Image Pipeline Server',
'git_hash': git_hash.decode("utf-8")
}
| agpl-3.0 | Python | |
81d7588a3a79c5962d99dea6764a2c1675ce9665 | test token method | CartoDB/cartodb-python,CartoDB/carto-python | tests/test_do_token.py | tests/test_do_token.py | import os
import pytest
from carto.do_token import DoTokenManager
@pytest.fixture(scope="module")
def do_token_manager(api_key_auth_client):
"""
Returns a do token manager that can be reused in tests
:param api_key_auth_client: Fixture that provides a valid
APIKeyAuthClient object
:return: DoTokenManager instance
"""
return DoTokenManager(api_key_auth_client)
@pytest.mark.skipif("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Integration tests not executed in Travis")
def test_get_token(do_token_manager):
"""
Get all the datasets from the API
:param do_token_manager: Fixture that provides a do token manager to work with
"""
token = do_token_manager.get()
assert token is not None
assert token.access_token is not None
assert isinstance(token.access_token, str)
assert len(token.access_token) > 0
| bsd-3-clause | Python | |
1523066235f47450dd88d334061ec31d0c78551a | add migration to update field choices and migrate data | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/legalaid/migrations/0013_auto_20160414_1429.py | cla_backend/apps/legalaid/migrations/0013_auto_20160414_1429.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_eod_categories(apps, schema_editor):
EODDetailsCategory = apps.get_model("legalaid", "EODDetailsCategory")
for eod in EODDetailsCategory.objects.filter(category='scope_or_means'):
eod.category = 'scope'
eod.save()
means_eod = EODDetailsCategory()
means_eod.category = 'means'
means_eod.eod_details = eod.eod_details
means_eod.is_major = eod.is_major
means_eod.save()
class Migration(migrations.Migration):
dependencies = [
('legalaid', '0012_auto_20151209_1500'),
]
operations = [
migrations.AlterField(
model_name='eoddetailscategory',
name='category',
field=models.CharField(blank=True, max_length=30, null=True, choices=[(b'incorrect', b'Believes operator has given incorrect information'), (b'scope', b'Unhappy with Operator Service determination (Scope)'), (b'means', b'Unhappy with Operator Service determination (Means)'), (b'delete', b'Wants personal details deleted'), (b'advisor_response', b'No response from specialist advisor, or response delayed'), (b'operator_delay', b'Operator service - delay in advice'), (b'operator_attitude', b"Unhappy with operator's attitude"), (b'advisor_attitude', b"Unhappy with specialist's attitude"), (b'alt_help', b'Alternative help not appropriate or not available'), (b'public_tool', b'Unhappy with online service'), (b'adaptations', b'Problems with adaptations or adjustments'), (b'scope_assessment', b'Scope reassessment requested'), (b'means_assessment', b'Financial reassessment requested'), (b'pass_to_public', b'Threatens to pass the matter on to the media, or other public or regulatory body'), (b'data_protection', b'Breach of Data Protection Act/policy and confidentiality'), (b'discrimination', b'Discrimination from an operator or specialist'), (b'plo_referral', b'Client unhappy with PLO referral'), (b'other', b'Other')]),
preserve_default=True,
),
migrations.RunPython(update_eod_categories)
]
| mit | Python | |
8091fc062541a0fb97ae556ea52927303b5bc73d | Add debug mode support to dokx-build-search index | deepmind/torch-dokx,Gueust/torch-dokx,deepmind/torch-dokx,yozw/torch-dokx,yozw/torch-dokx,deepmind/torch-dokx,Gueust/torch-dokx,yozw/torch-dokx,Gueust/torch-dokx | dokx-search/dokx-build-search-index.py | dokx-search/dokx-build-search-index.py | """
Create and populate a minimal PostgreSQL schema for full text search
"""
import sqlite3
import glob
import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, help="Path to write SQLite3 search index")
parser.add_argument("--debug", type=bool, help="Debug mode")
parser.add_argument('input', type=str, help="Path to input directory of Markdown files")
args = parser.parse_args()
DB_NAME = args.output
DB_HOST = 'localhost' # Uses a local socket
DB_USER = 'fts_user'
DB = sqlite3.connect(database=DB_NAME)
path = args.input
def debug(msg):
if args.debug:
print(msg)
def load_db():
"""Add sample data to the database"""
ins = """INSERT INTO fulltext_search(package, tag, doc) VALUES(?, ?, ?);"""
pattern = re.compile('<a name="(.*)"></a>')
for packageName in os.listdir(path):
for filePath in glob.glob(os.path.join(path, packageName, "*.md")):
debug("Indexing " + filePath)
with open(filePath, 'r') as f:
section = ""
tag = os.path.basename(filePath)
for line in f.readlines():
result = pattern.match(line)
if result:
DB.execute(ins, (packageName, tag, section))
tag = result.group(1)
section = ""
else:
section += line
DB.commit()
def init_db():
"""Initialize our database"""
DB.execute("DROP TABLE IF EXISTS fulltext_search")
DB.execute("""CREATE VIRTUAL TABLE fulltext_search USING fts4(
id SERIAL,
package TEXT,
tag TEXT,
doc TEXT,
tokenize=porter
);""")
if __name__ == "__main__":
init_db()
load_db()
DB.close()
| """
Create and populate a minimal PostgreSQL schema for full text search
"""
import sqlite3
import glob
import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, help="Path to write SQLite3 search index")
parser.add_argument('input', type=str, help="Path to input directory of Markdown files")
args = parser.parse_args()
DB_NAME = args.output
DB_HOST = 'localhost' # Uses a local socket
DB_USER = 'fts_user'
DB = sqlite3.connect(database=DB_NAME)
path = args.input
def load_db():
"""Add sample data to the database"""
ins = """INSERT INTO fulltext_search(package, tag, doc) VALUES(?, ?, ?);"""
pattern = re.compile('<a name="(.*)"></a>')
for packageName in os.listdir(path):
for filePath in glob.glob(os.path.join(path, packageName, "*.md")):
print("Indexing " + filePath)
with open(filePath, 'r') as f:
section = ""
tag = os.path.basename(filePath)
for line in f.readlines():
result = pattern.match(line)
if result:
DB.execute(ins, (packageName, tag, section))
tag = result.group(1)
section = ""
else:
section += line
DB.commit()
def init_db():
"""Initialize our database"""
DB.execute("DROP TABLE IF EXISTS fulltext_search")
DB.execute("""CREATE VIRTUAL TABLE fulltext_search USING fts4(
id SERIAL,
package TEXT,
tag TEXT,
doc TEXT,
tokenize=porter
);""")
if __name__ == "__main__":
init_db()
load_db()
DB.close()
| bsd-3-clause | Python |
a2dfa00b8a3b5ef9b969b0848ff3c933365a2eed | add transitional submodule | oesteban/mriqc,poldracklab/mriqc,oesteban/mriqc,poldracklab/mriqc,poldracklab/mriqc,poldracklab/mriqc,oesteban/mriqc,oesteban/mriqc | mriqc/interfaces/transitional.py | mriqc/interfaces/transitional.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function, division, absolute_import, unicode_literals
from nipype.interfaces.base import File, traits, CommandLine, TraitedSpec, CommandLineInputSpec
class GCORInputSpec(CommandLineInputSpec):
in_file = File(
desc='input dataset to compute the GCOR over',
argstr='-input %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
mask = File(
desc='mask dataset, for restricting the computation',
argstr='-mask %s',
exists=True,
copyfile=False)
nfirst = traits.Int(0, argstr='-nfirst %d',
desc='specify number of initial TRs to ignore')
no_demean = traits.Bool(False, argstr='-no_demean',
desc='do not (need to) demean as first step')
class GCOROutputSpec(TraitedSpec):
out = traits.Float(desc='global correlation value')
class GCOR(CommandLine):
"""
Computes the average correlation between every voxel
and ever other voxel, over any give mask.
For complete details, see the `@compute_gcor Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/@compute_gcor.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> gcor = afni.GCOR()
>>> gcor.inputs.in_file = 'structural.nii'
>>> gcor.inputs.nfirst = 4
>>> gcor.cmdline # doctest: +ALLOW_UNICODE
'@compute_gcor -nfirst 4 -input structural.nii'
>>> res = gcor.run() # doctest: +SKIP
"""
_cmd = '@compute_gcor'
input_spec = GCORInputSpec
output_spec = GCOROutputSpec
def _run_interface(self, runtime):
runtime = super(GCOR, self)._run_interface(runtime)
gcor_line = [line.strip() for line in runtime.stdout.split('\n')
if line.strip().startswith('GCOR = ')][-1]
setattr(self, '_gcor', float(gcor_line[len('GCOR = '):]))
return runtime
def _list_outputs(self):
return {'out': getattr(self, '_gcor')}
| apache-2.0 | Python | |
242f7d6ea3d733b08f60ad1f43fc490984989d30 | add import script for Swale | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_swale.py | polling_stations/apps/data_collection/management/commands/import_swale.py | from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
srid = 27700
council_id = 'E07000113'
districts_name = 'shp/Swale Polling Districts'
stations_name = 'shp/Swale Polling Stations.shp'
elections = ['local.kent.2017-05-04']
def district_record_to_dict(self, record):
code = str(record[0]).strip()
return {
'internal_council_id': code,
'name': str(record[1]).strip(),
'polling_station_id': code,
}
def station_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'postcode': '',
'address': str(record[4]).strip(),
}
| bsd-3-clause | Python | |
eceaf20ec82eebec70d1314f74c54eea8f51158c | Add TestShell class | DigitalPandacoin/pandacoin,DigitalPandacoin/pandacoin,peercoin/peercoin,peercoin/peercoin,DigitalPandacoin/pandacoin,peercoin/peercoin,peercoin/peercoin,DigitalPandacoin/pandacoin,peercoin/peercoin,DigitalPandacoin/pandacoin,peercoin/peercoin,DigitalPandacoin/pandacoin | test/functional/test_framework/test_shell.py | test/functional/test_framework/test_shell.py | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
class TestShell:
"""Wrapper Class for BitcoinTestFramework.
The TestShell class extends the BitcoinTestFramework
rpc & daemon process management functionality to external
python environments.
It is a singleton class, which ensures that users only
start a single TestShell at a time."""
class __TestShell(BitcoinTestFramework):
def set_test_params(self):
pass
def run_test(self):
pass
def setup(self, **kwargs):
if self.running:
print("TestShell is already running!")
return
# Num_nodes parameter must be set
# by BitcoinTestFramework child class.
self.num_nodes = kwargs.get('num_nodes', 1)
kwargs.pop('num_nodes', None)
# User parameters override default values.
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
elif hasattr(self.options, key):
setattr(self.options, key, value)
else:
raise KeyError(key + " not a valid parameter key!")
super().setup()
self.running = True
def shutdown(self):
if not self.running:
print("TestShell is not running!")
else:
super().shutdown()
self.running = False
def reset(self):
if self.running:
print("Shutdown TestWrapper before resetting!")
else:
self.num_nodes = None
super().__init__()
instance = None
def __new__(cls):
# This implementation enforces singleton pattern, and will return the
# previously initialized instance if available
if not TestShell.instance:
TestShell.instance = TestShell.__TestShell()
TestShell.instance.running = False
return TestShell.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name, value):
return setattr(self.instance, name, value)
| mit | Python | |
3a2349cf86ed4562a911f3609f9b8c039d228989 | test for phones and address done | holi87/PythonSzkolenie | test/test_addresses.py | test/test_addresses.py | from model.contact import Contact
from random import randrange
__author__ = "Grzegorz Holak"
def test_phones_on_home_page(app):
# when there is no contact - make one for test
if app.contact.count() == 0:
app.contact.create(Contact(first_name="jakies losowe", last_name="nazwisko siakie", address="""adres1
adres2"""))
index = randrange(len(app.contact.get_contact_list()))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.address == contact_from_edit_page.address
| apache-2.0 | Python | |
a8bded67a92632fd6d2d8791dd245dc82c773c8d | Add an integration folder for tests that are beyond the unittest scope | markrwilliams/tectonic | integration/basic_server.py | integration/basic_server.py | """
This is a basic test which allows you to setup a server and listen to it.
For example, running:
python integration/basic_server.py localhost 8040
Sets up a server.
Running curl against it generates the following reponse:
curl 'http://localhost:8040/'
<html><body><h1>ok</h1><br/>from 28330
And in server output will print out the entire string (Lorem ipsum dolor etc.)
"""
import os
import sys
import string
import argparse
import gevent.pywsgi
from tectonic.prefork import Master
if __name__ == '__main__':
a = argparse.ArgumentParser()
a.add_argument('address')
a.add_argument('port', type=int)
a.add_argument('--logpath', default='log')
a.add_argument('--pidfile', default='pidfile')
a.add_argument('--daemonize', '-d', default=False, action='store_true')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
pid = os.getpid()
spid = str(pid)
sys.stderr.write('''\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus
eleifend a metus quis sollicitudin. Aenean nec dolor iaculis, rhoncus
turpis sit amet, interdum quam. Nunc rhoncus magna a leo interdum
luctus. Vestibulum nec sapien diam. Aliquam rutrum venenatis
mattis. Etiam eget adipiscing risus. Vestibulum ante ipsum primis in
faucibus orci luctus et ultrices posuere cubilia Curae; Fusce nibh
nulla, lacinia quis dignissim vel, condimentum at odio. Nunc et diam
mauris. Fusce sit amet odio sagittis, convallis urna a, blandit
urna. Phasellus mattis ligula sed tincidunt pellentesque. Nullam
tempor convallis dapibus.
Duis vitae vulputate sem, nec eleifend orci. Donec vel metus
fringilla, ultricies nunc at, ultrices quam. Donec placerat nisi quis
fringilla facilisis. Fusce eget erat ut magna consectetur
elementum. Aenean non vulputate nulla. Aliquam eu dui nibh. Vivamus
mollis suscipit neque, quis aliquam ipsum auctor non. Nulla cursus
turpis turpis, nec euismod urna placerat at. Nunc id sapien
nibh. Vestibulum condimentum luctus placerat. Donec vitae posuere
arcu.''' + '\n')
return ['<html><body><h1>ok</h1><br/>from ' + spid]
args = a.parse_args()
Master(server_class=gevent.pywsgi.WSGIServer,
socket_factory=gevent.socket.socket,
sleep=gevent.sleep,
wsgi=wsgi,
address=(args.address, args.port),
logpath=args.logpath,
pidfile=args.pidfile).run(args.daemonize)
| bsd-3-clause | Python | |
c34eb62d19c4216aa54199a083a06c2c45318cea | Add missing migration for IMEI db validation. | akatsoulas/feedthefox,akatsoulas/feedthefox,mozilla/feedthefox,mozilla/feedthefox,akatsoulas/feedthefox,mozilla/feedthefox,akatsoulas/feedthefox,mozilla/feedthefox | feedthefox/devices/migrations/0006_auto_20151110_1355.py | feedthefox/devices/migrations/0006_auto_20151110_1355.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import feedthefox.devices.models
class Migration(migrations.Migration):
dependencies = [
('devices', '0005_auto_20151105_1048'),
]
operations = [
migrations.AlterField(
model_name='deviceinfo',
name='imei',
field=models.CharField(default='', blank=True, max_length=17, validators=[feedthefox.devices.models.validate_imei]),
),
]
| mpl-2.0 | Python | |
3d30708839236e65e7e83ce2baf8dd0364451fb5 | add test_lastgenre.py | shanemikel/beets,madmouser1/beets,Wen777/beets,ruippeixotog/beets,kareemallen/beets,shanemikel/beets,drm00/beets,Freso/beets,Dishwishy/beets,LordSputnik/beets,PierreRust/beets,lengtche/beets,drm00/beets,imsparsh/beets,arabenjamin/beets,jcoady9/beets,randybias/beets,asteven/beets,YetAnotherNerd/beets,sampsyo/beets,arabenjamin/beets,lightwang1/beets,ibmibmibm/beets,ibmibmibm/beets,dfc/beets,untitaker/beets,Wen777/beets,moodboom/beets,ttsda/beets,lightwang1/beets,shamangeorge/beets,madmouser1/beets,untitaker/beets,bj-yinyan/beets,bj-yinyan/beets,pkess/beets,swt30/beets,mried/beets,Kraymer/beets,beetbox/beets,MyTunesFreeMusic/privacy-policy,gabrielaraujof/beets,jmwatte/beets,jackwilsdon/beets,ttsda/beets,jcoady9/beets,kareemallen/beets,gabrielaraujof/beets,Dishwishy/beets,kareemallen/beets,ruippeixotog/beets,randybias/beets,bj-yinyan/beets,xsteadfastx/beets,parapente/beets,Dishwishy/beets,shamangeorge/beets,gabrielaraujof/beets,ruippeixotog/beets,moodboom/beets,marcuskrahl/beets,shanemikel/beets,pkess/beets,jackwilsdon/beets,jackwilsdon/beets,YetAnotherNerd/beets,randybias/beets,jcoady9/beets,sampsyo/beets,madmouser1/beets,randybias/beets,tima/beets,jmwatte/beets,beetbox/beets,Andypsamp/CODjunit,swt30/beets,swt30/beets,kelvinhammond/beets,imsparsh/beets,parapente/beets,Andypsamp/CODjunit,kareemallen/beets,m-urban/beets,mathstuf/beets,diego-plan9/beets,marcuskrahl/beets,Freso/beets,MyTunesFreeMusic/privacy-policy,ttsda/beets,moodboom/beets,diego-plan9/beets,pkess/beets,untitaker/beets,lightwang1/beets,sadatay/beets,m-urban/beets,lengtche/beets,asteven/beets,PierreRust/beets,lengtche/beets,artemutin/beets,shamangeorge/beets,Freso/beets,Freso/beets,LordSputnik/beets,MyTunesFreeMusic/privacy-policy,kelvinhammond/beets,untitaker/beets,ruippeixotog/beets,Kraymer/beets,asteven/beets,mosesfistos1/beetbox,andremiller/beets,jcoady9/beets,Andypsamp/CODjunit,ibmibmibm/beets,beetbox/beets,LordSputnik/beets,tima/beets,moodboom/beets,kelvinhammond/beets,asteven/beets,jackwilsdon/beets,madmouser1/beets,bj-yinyan/beets,PierreRust/beets,mried/beets,Andypsamp/CODjunit,Wen777/beets,Andypsamp/CODjunit,shanemikel/beets,mried/beets,multikatt/beets,dfc/beets,Andypsamp/CODfinalJUNIT,jmwatte/beets,artemutin/beets,pkess/beets,Dishwishy/beets,mathstuf/beets,arabenjamin/beets,dfc/beets,SusannaMaria/beets,m-urban/beets,marcuskrahl/beets,diego-plan9/beets,mosesfistos1/beetbox,YetAnotherNerd/beets,Andypsamp/CODfinalJUNIT,Andypsamp/CODfinalJUNIT,Andypsamp/CODfinalJUNIT,sadatay/beets,xsteadfastx/beets,YetAnotherNerd/beets,LordSputnik/beets,imsparsh/beets,sadatay/beets,jmwatte/beets,sadatay/beets,dfc/beets,xsteadfastx/beets,sampsyo/beets,kelvinhammond/beets,multikatt/beets,xsteadfastx/beets,swt30/beets,parapente/beets,tima/beets,MyTunesFreeMusic/privacy-policy,Kraymer/beets,lengtche/beets,mathstuf/beets,arabenjamin/beets,drm00/beets,parapente/beets,gabrielaraujof/beets,ttsda/beets,mried/beets,SusannaMaria/beets,imsparsh/beets,m-urban/beets,marcuskrahl/beets,artemutin/beets,multikatt/beets,Andypsamp/CODfinalJUNIT,andremiller/beets,diego-plan9/beets,tima/beets,Kraymer/beets,andremiller/beets,multikatt/beets,sampsyo/beets,ibmibmibm/beets,mathstuf/beets,beetbox/beets,mosesfistos1/beetbox,artemutin/beets,SusannaMaria/beets,drm00/beets,PierreRust/beets,shamangeorge/beets,mosesfistos1/beetbox,SusannaMaria/beets,lightwang1/beets | test/test_lastgenre.py | test/test_lastgenre.py | # This file is part of beets.
# Copyright 2014, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lastgenre' plugin."""
from _common import unittest
from beetsplug import lastgenre
from beets import config
class LastGenrePluginTest(unittest.TestCase):
def setUp(self):
"""Set up configuration"""
lyrics.LastGenrePlugin()
def _setup_config(self, whitelist=set(), branches=None, count=1):
config['lastgenre']['whitelist'] = whitelist
if branches:
config['lastgenre']['branches'] = branches
config['lastgenre']['c14n'] = True
else:
config['lastgenre']['c14n'] = False
config['lastgenre']['count'] = count
def test_c14n():
_setup_config(set('blues'),
[['blues'],
['blues', 'country blues'],
['blues', 'country blues', 'delta blues']])
self.assertEqual(lastgenre._strings_to_genre(['delta blues']),
'blues')
| mit | Python | |
5f01a1e6c8fe7230eaf9f72f27911537cda28647 | add tender document views for stage2 UA, EU | gorserg/openprocurement.tender.competitivedialogue,openprocurement/openprocurement.tender.competitivedialogue | openprocurement/tender/competitivedialogue/views/stage2/tender_document.py | openprocurement/tender/competitivedialogue/views/stage2/tender_document.py | # -*- coding: utf-8 -*-
from openprocurement.api.utils import opresource
from openprocurement.tender.openeu.views.tender_document import TenderEUDocumentResource
from openprocurement.tender.openua.views.tender_document import TenderUaDocumentResource
from openprocurement.tender.competitivedialogue.models import STAGE_2_UA_TYPE, STAGE_2_EU_TYPE
@opresource(name='Competitive Dialogue Stage 2 EU Documents',
collection_path='/tenders/{tender_id}/documents',
path='/tenders/{tender_id}/documents/{document_id}',
procurementMethodType=STAGE_2_EU_TYPE,
description="Competitive Dialogue Stage 2 EU related binary files (PDFs, etc.)")
class CompetitiveDialogueStage2EUDocumentResource(TenderEUDocumentResource):
pass
@opresource(name='Competitive Dialogue Stage 2 UA Documents',
collection_path='/tenders/{tender_id}/documents',
path='/tenders/{tender_id}/documents/{document_id}',
procurementMethodType=STAGE_2_UA_TYPE,
description="Competitive Dialogue Stage 2 UA related binary files (PDFs, etc.)")
class CompetitiveDialogueStage2UADocumentResource(TenderUaDocumentResource):
pass
| apache-2.0 | Python | |
93b16422d29243280c8553236afaf77516f3466f | test parseLayoutFeatures function | moyogo/ufo2ft,jamesgk/ufo2ft,jamesgk/ufo2fdk,googlei18n/ufo2ft,googlefonts/ufo2ft | tests/featureCompiler_test.py | tests/featureCompiler_test.py | from __future__ import (
print_function,
division,
absolute_import,
unicode_literals,
)
from textwrap import dedent
import logging
from fontTools.feaLib.error import IncludedFeaNotFound
from ufo2ft.featureCompiler import (
FeatureCompiler,
MtiFeatureCompiler,
parseLayoutFeatures,
)
import pytest
from .testSupport import pushd
class ParseLayoutFeaturesTest(object):
def test_include(self, FontClass, tmpdir):
tmpdir.join("test.fea").write_text(
dedent(
"""\
# hello world
"""
),
encoding="utf-8",
)
ufo = FontClass()
ufo.features.text = dedent(
"""\
include(test.fea)
"""
)
ufo.save(str(tmpdir.join("Test.ufo")))
fea = parseLayoutFeatures(ufo)
assert "# hello world" in str(fea)
def test_include_no_ufo_path(self, FontClass, tmpdir):
ufo = FontClass()
ufo.features.text = dedent(
"""\
include(test.fea)
"""
)
with pushd(str(tmpdir)):
with pytest.raises(IncludedFeaNotFound):
parseLayoutFeatures(ufo)
def test_include_not_found(self, FontClass, tmpdir, caplog):
caplog.set_level(logging.ERROR)
tmpdir.join("test.fea").write_text(
dedent(
"""\
# hello world
"""
),
encoding="utf-8",
)
ufo = FontClass()
ufo.features.text = dedent(
"""\
include(../test.fea)
"""
)
ufo.save(str(tmpdir.join("Test.ufo")))
logger = "ufo2ft.featureCompiler"
with caplog.at_level(logging.WARNING, logger=logger):
with pytest.raises(IncludedFeaNotFound):
parseLayoutFeatures(ufo)
assert len(caplog.records) == 1
assert "change the file name in the include" in caplog.text
| mit | Python | |
b3b7e2fcbff5cd0ec2d2b4457b7a46d1846d55a8 | Implement a generic Vispy widget | PennyQ/glue-3d-viewer,PennyQ/astro-vispy,glue-viz/glue-vispy-viewers,glue-viz/glue-3d-viewer,astrofrog/glue-vispy-viewers,astrofrog/glue-3d-viewer | glue_vispy_viewers/common/vispy_viewer.py | glue_vispy_viewers/common/vispy_viewer.py | from __future__ import absolute_import, division, print_function
import sys
from vispy import scene
from glue.external.qt import QtGui, get_qapp
class VispyWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(VispyWidget, self).__init__(parent=parent)
# Prepare Vispy canvas
self.canvas = scene.SceneCanvas(keys='interactive', show=False)
# Set up a viewbox
self.view = self.canvas.central_widget.add_view()
self.view.parent = self.canvas.scene
# Set whether we are emulating a 3D texture. This needs to be enabled
# as a workaround on Windows otherwise VisPy crashes.
self.emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
# Add a 3D axis to keep us oriented
self.axis = scene.visuals.XYZAxis(parent=self.view.scene)
# Create a turntable camera. For now, this is the only camerate type
# we support, but if we support more in future, we should implement
# that here
self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene,
fov=90)
# Add the native canvas widget to this widget
layout = QtGui.QVBoxLayout()
layout.addWidget(self.canvas.native)
self.setLayout(layout)
def _update_stretch(self):
pass
def _update_attributes(self):
pass
def _update_limits(self):
pass
def _reset_view(self):
self.view.camera.reset()
if __name__ == "__main__":
from viewer_options import VispyOptionsWidget
app = get_qapp()
w = VispyWidget()
d = VispyOptionsWidget(vispy_widget=w)
d.show()
w.show()
app.exec_()
app.quit()
| bsd-2-clause | Python | |
5883232eed25cf96ca275af6904d284201306c0a | Add an __init__.py that exposes all the modules from the package. | nanaze/jsdoctor,Prachigarg1/Prachi,nanaze/jsdoctor,nanaze/jsdoctor,Prachigarg1/Prachi,Prachigarg1/Prachi | __init__.py | __init__.py | # Expose the modules of the package.
import flags
import generator
import jsdoc
import linkify
import namespace
import scanner
import source
import symboltypes
| apache-2.0 | Python | |
10e4c82f0d3440ffd4737589870b85c61a83d1b8 | add init for import | yupbank/no_csrf | __init__.py | __init__.py | from no_csrf import nicer_get
| mit | Python | |
5beae4a29f973ab3980f00cf180c0d571e84a79e | Create __init__.py | Knowlege/tiledtmxloader | __init__.py | __init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
TileMap loader for python for Tiled, a generic tile map editor
from http://mapeditor.org/ .
It loads the \*.tmx files produced by Tiled.
"""
__all__ = ["tmxreader"]
from . import tmxreader
try:
from . import helperspygame
__all__.append("helperspygame")
except:
pass
try:
from . import helperspyglet
__all__.append("helperspyglet")
except:
pass
# Versioning scheme based on: http://en.wikipedia.org/wiki/Versioning#Designating_development_stage
#
# +-- api change, probably incompatible with older versions
# | +-- enhancements but no api change
# | |
# major.minor[.build[.revision]]
# |
# +-|* 0 for alpha (status)
# |* 1 for beta (status)
# |* 2 for release candidate
# |* 3 for (public) release
#
# For instance:
# * 1.2.0.1 instead of 1.2-a
# * 1.2.1.2 instead of 1.2-b2 (beta with some bug fixes)
# * 1.2.2.3 instead of 1.2-rc (release candidate)
# * 1.2.3.0 instead of 1.2-r (commercial distribution)
# * 1.2.3.5 instead of 1.2-r5 (commercial distribution with many bug fixes)
__revision__ = "$Rev: 115 $"
__version__ = tmxreader.__version__
__author__ = 'DR0ID @ 2009-2011'
#-------------------------------------------------------------------------------
| bsd-3-clause | Python | |
1c12cd1811afb21e44a295d42be24169d042bfcb | Create __init__.py | kelceydamage/sherpa,kelceydamage/sherpa | __init__.py | __init__.py | apache-2.0 | Python | ||
5e370df87e4846e6673d4566cef0291a43cde883 | Create __init__.py | jasongwartz/PyEmailWatcher | __init__.py | __init__.py | mit | Python | ||
3c9a6b871011dfed53c8c5c820aee62c90d04344 | add new parser class to computer's cortex | IanDCarroll/xox | OnStage/parser_abilities.py | OnStage/parser_abilities.py | class Parser(object):
def get_empty_square(self, options, code):
row_1 = [0, [0,1,2]]
row_2 = [1, [3,4,5]]
row_3 = [2, [6,7,8]]
col_1 = [3, [0,3,6]]
col_2 = [4, [1,4,7]]
col_3 = [5, [2,5,8]]
diag1 = [6, [0,4,8]]
diag2 = [7, [2,4,6]]
if code == row_1[0]:
return self.find_empty_spot(options, row_1[1])
elif code == row_2[0]:
return self.find_empty_spot(options, row_2[1])
elif code == row_3[0]:
return self.find_empty_spot(options, row_3[1])
elif code == col_1[0]:
return self.find_empty_spot(options, col_1[1])
elif code == col_2[0]:
return self.find_empty_spot(options, col_2[1])
elif code == col_3[0]:
return self.find_empty_spot(options, col_3[1])
elif code == diag1[0]:
return self.find_empty_spot(options, diag1[1])
elif code == diag2[0]:
return self.find_empty_spot(options, diag[2])
def find_empty_spot(self, options, index_list):
for spot in index_list:
if spot in options:
return spot
| mit | Python | |
d025d862a357eb99490133cc25f27e52a6573015 | add image module | anqxyr/jarvis | jarvis/images.py | jarvis/images.py | #!/usr/bin/env python3
"""Commands for the Image Team."""
###############################################################################
# Module Imports
###############################################################################
import collections
import pyscp
import re
from . import core, parser, lexicon
###############################################################################
# Global Variables
###############################################################################
wiki = pyscp.wikidot.Wiki('scp-stats')
wiki.auth(core.config['wiki']['name'], core.config['wiki']['pass'])
IMAGES = collections.defaultdict(list)
###############################################################################
# Internal Functions
###############################################################################
class Image:
def __init__(self, url, page, source, status, notes):
self.url = url
self.page = page
self.source = source
self.status = status
self.notes = notes
def load_images():
soup = wiki('images')._soup
for category in soup(class_='collapsible-block'):
name = category.find(class_='collapsible-block-link').text
rows = category('tr')
for row, notes in zip(rows[::2], rows[1::2]):
url, page, source, status = row('td')
url = url.img['src']
page = page.a['href']
source = source.a['href'] if source('a') else ''
status = status.text
notes = notes.find('td').text
IMAGES[name].append(Image(url, page, source, status, notes))
def save_images(category):
def wtag(name, *data, **kwargs):
args = []
for k, v in kwargs.items():
args.append('{}="{}"'.format(k, v))
result = ['[[{} {}]]'.format(name, ' '.join(args))]
result.extend(data)
result.append('[[/{}]]'.format(name))
return '\n'.join(result)
rows = []
for image in sorted(IMAGES[category], key=lambda x: x.page):
img = '[[image {0.url} width="100px"]]'.format(image)
img = wtag('cell', img, rowspan=2)
page = image.page.split('/')[-1]
page = '[{} {}]'.format(image.page, page)
page = wtag('cell', page)
source = re.match(r'https*://(?:www\.)?([^/]+)', image.source)
source = source.group(1) if source else ''
source = source and '[{} {}]'.format(image.source, source)
source = wtag('cell', source)
status = image.status.lower().replace(' ', '-')
status = '[[span class="{}"]]{}[[/span]]'.format(status, image.status)
status = wtag('cell', status)
rows.append(wtag('row', img, page, source, status))
rows.append(wtag('row', wtag('cell', image.notes, colspan=4)))
wiki('images:' + category).create(wtag('table', *rows), category)
def get_page_category(page):
if 'scp' in page.tags and re.match(r'.*scp-[0-9]+$', page.url):
num = int(page.url.split('-')[-1])
num = (num // 100) * 100
return '{:03d}-{:03d}'.format(num or 2, num + 99)
for tag in ('joke', 'explained', 'archived'):
if tag in page.tags:
return tag
if 'goi-format' in page.tags:
return 'goi'
if 'tale' in page.tags:
l = page.title[0].lower()
for k, v in dict(g='A-F', n='G-M', u='N-T').items():
if l < k:
return v
return 'U-Z'
if page.url in core.wiki('001').links:
return '001'
###############################################################################
# Bot Commands
###############################################################################
@core.command
@parser.images
def images(inp, mode):
funcs = [images_scan]
funcs = {f.__name__.split('_')[-1]: f for f in funcs}
return funcs[mode](inp)
@parser.images_scan
def images_scan(inp, *, page):
page = core.wiki(page)
cat = get_page_category(page)
if not cat:
return lexicon.images.scan.unknown_category
counter = 0
for img in page._soup.find(id='page-content')('img'):
print(img)
if any(i.url == img['src'] for i in IMAGES[cat]):
continue
img = Image(img['src'], page.url, '', '', '')
IMAGES[cat].append(img)
counter += 1
save_images(cat)
if counter == 1:
return lexicon.images.scan.added_one
elif counter > 1:
return lexicon.images.scan.added_several.format(count=counter)
else:
return lexicon.images.scan.added_none
###############################################################################
load_images()
| mit | Python | |
e7879fa6a123013501e6cddabc073d17d643e42b | Create EPICLocSearch_size.py | RetelC/PDra_Phylogeography,RetelC/PDra_Phylogeography | EPICLocSearch_size.py | EPICLocSearch_size.py | " " " this file was created in november 2014
as part of a de novo search for EPIC loci in
the chaetognath species Pterosagitta draco
property of dr. Ferdinand Marlétaz
" " "
#!/usr/bin/env python
import sys
from collections import defaultdict
flanks=defaultdict(list)
for line in open(sys.argv[1]):
bline=line.rstrip().split('\t')
name=bline[0].rsplit('_',1)[0]
side=bline[0].rsplit('_',1)[1]
#start,end=sort(int(bline[8]),int(bline[9]))
flanks[name].extend([int(bline[8]),int(bline[9])])
for intr,flk in flanks.items():
if len(flk)==4:
spos=sorted(flk)
#print flk,'->',spos
print intr,abs(spos[2]-spos[1])
#else:
# print intr,','.join(map(str,flk))
| mit | Python | |
8994f69f23271aa93d83e81032542f17b38423fd | Add custom IPython configuration ✨ | reillysiemens/dotfiles,reillysiemens/dotfiles | .ipython/profile_default/ipython_config.py | .ipython/profile_default/ipython_config.py | """
IPython configuration with custom prompt using gruvbox colors.
- https://github.com/reillysiemens/ipython-style-gruvbox
Thanks to @petobens for their excellent dotfiles.
- https://github.com/petobens/dotfiles
"""
from typing import List, Optional, Tuple
import IPython.terminal.prompts as prompts
from prompt_toolkit.application import get_app
from prompt_toolkit.key_binding.vi_state import InputMode
from pygments.token import _TokenType, Token
from gruvbox import Color, GruvboxStyle
config = get_config() # type: ignore # noqa: E0602
class Prompt(prompts.Prompts):
"""Custom IPython prompt."""
_before: str = "❰"
_after: str = "❱ ⬢ "
_continuation: str = "… "
def in_prompt_tokens(self) -> List[Tuple[_TokenType, str]]:
"""Return in prompt."""
if get_app().vi_state.input_mode == InputMode.INSERT:
prompt_token = prompts.Token.InsertPrompt
num_token = prompts.Token.InsertPromptNum
else:
prompt_token = prompts.Token.NavPrompt
num_token = prompts.Token.NavPromptNum
return [
(prompt_token, self._before),
(num_token, str(self.shell.execution_count)),
(prompt_token, self._after),
]
def continuation_prompt_tokens(
self, width: Optional[int] = None
) -> List[Tuple[_TokenType, str]]:
"""Return continuation prompt."""
if width is None:
width = self._width()
if get_app().vi_state.input_mode == InputMode.INSERT:
token = prompts.Token.InsertPrompt
else:
token = prompts.Token.NavPrompt
return [(token, " " * (width - 2) + self._continuation)]
def out_prompt_tokens(self) -> List[Tuple[_TokenType, str]]:
"""Return out prompt."""
return []
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.confirm_exit = False
config.TerminalInteractiveShell.editing_mode = "vi"
config.TerminalInteractiveShell.true_color = True
config.TerminalInteractiveShell.prompts_class = Prompt
config.TerminalInteractiveShell.highlighting_style = GruvboxStyle
config.TerminalInteractiveShell.highlighting_style_overrides = {
Token.InsertPrompt: Color.neutral_blue,
Token.NavPrompt: Color.neutral_purple,
Token.InsertPromptNum: f"{Color.neutral_purple} bold",
Token.NavPromptNum: f"{Color.neutral_blue} bold",
}
| isc | Python | |
12428f3aebc8ef54b8308f676b3d7b7d7e3077e7 | add ingestion of tar files form SNEX | svalenti/lcogtsnpipe | trunk/bin/ingesttar.py | trunk/bin/ingesttar.py | #/usr/bin/env python
import lsc
import sys
import os
import re
import string
import tarfile
from optparse import OptionParser
_dir = os.environ['LCOSNDIR']
def ingesttar(_tarfile,force=False):
_targetid = re.sub('.tar.gz','',string.split(_tarfile,'_')[-1])
my_tar = tarfile.open(_tarfile)
imglist = my_tar.getnames()
my_tar.extractall(_dir)
my_tar.close()
if force:
lsc.mysqldef.ingestredu(imglist, 'yes') # ingest new data into photlco
else:
lsc.mysqldef.ingestredu(imglist, 'no') # ingest new data into photlco
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description='Downloads data from SNEX')
parser.add_argument("-f", "--file")
parser.add_argument("-G", "--force-db", action="store_true", help="reingest files even if they already exist")
args = parser.parse_args()
if args.file:
_tarfile = args.file
ingesttar(_tarfile, args.force_db)
else:
print('tar file not included (-f tarfile)')
| mit | Python | |
c42f891b8d11efba950876b9c4eb7a77cc08c743 | move to correct directory | r3dact3d/tweeter | tweepy/urbanTwitBot.py | tweepy/urbanTwitBot.py | #!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import tweepy
from config import *
url = 'http://www.urbandictionary.com/'
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
data = dict()
data['def'] = soup(class_ = 'meaning')[0].text
data['word'] = soup(class_ = 'word')[0].text
word = data['word'].strip('u').strip('\n')
meaning = data['def'].strip('u').strip('\n')
short = 'https://goo.gl/gZMF'
payLoad = 'Daily #UrbanDictionary> %s: %s ... %s' % (word, meaning[:65], short)
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
def tweet(payLoad):
try:
print(payLoad)
if payLoad != '\n':
api.update_status(payLoad)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
tweet(payLoad)
| unlicense | Python | |
b3b5337ddcc5984602eab46a085e976028ff2a77 | Add initial implementation of the LayerNormalizationLayer | erfannoury/capgen-lasagne | LayerNormalization.py | LayerNormalization.py | import numpy as np
import theano
import theano.tensor as T
import lasagne
__all__ = [
"LayerNormalizationLayer"
]
class LayerNormalizationLayer(Layer):
"""
LayerNormalizationLayer(incoming, feat_dims,
alpha=lasagne.init.Constant(1.0), beta=lasagne.init.Constant(0.0),
**kwargs)
A Layer Normalization layer.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
feat_dims : int
The number of feature dimensions. The last dimensions for
features of each instance will be normalized.
alpha : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the alpha.
This should be a tensor with shape ``(feat1, feat2, ...)``.
See :func:`lasagne.utils.create_param` for more information.
beta : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the beta.
This should be a vector with shape ``(feat1, feat2, ...)``.
See :func:`lasagne.utils.create_param` for more information.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> from LayerNormalization import LayerNormalizationLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=50)
>>> ln = LayerNormalizationLayer(l1, 1)
"""
def __init__(self, incoming, feat_dims,
alpha=lasagne.init.Constant(1.0),
beta=lasagne.init.Constant(0.0),
eps = 1e-5,
**kwargs):
super(LayerNormalizationLayer, self).__init__(incoming, **kwargs)
if feat_dims < 1:
raise ValueError("Number of feature dimensions should be"
"greater than zero.")
self.feat_dims = feat_dims
self.eps = eps
self.feat_shape = self.input_shape[-self.feat_dims:]
self.norm_axes = tuple([-i for i in xrange(1, feat_dims + 1)][::-1])
self.alpha = self.add_param(alpha, self.feat_shape, name="alpha")
self.beta = self.add_param(beta, self.feat_shape, name="beta",
regularizable=False)
def get_output_shape_for(self, input_shape):
return self.input_shape
def get_output_for(self, input, **kwargs):
input = (input - input.mean(self.norm_axes, keepdims=True)) / \
T.sqrt(input.var(self.norm_axes, keepdims=True) + self.eps)
alpha = T.shape_padleft(self.alpha, input.ndim - self.feat_dims)
return input * alpha + self.beta | mit | Python | |
dfeca2aea76f226cfcf3ed06bbcfdcdf7f7d2f44 | Add member event | brantje/captain_hook,brantje/telegram-github-bot,brantje/telegram-github-bot,brantje/captain_hook,brantje/captain_hook,brantje/telegram-github-bot | captain_hook/services/github/events/member.py | captain_hook/services/github/events/member.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import GithubEvent
class MemberEvent(GithubEvent):
def process(self, request, body):
sender_link = body['sender']['html_url'].replace('https://github.com/', '')
user_link = body['member']['html_url'].replace('https://github.com/', '')
repo_link = body['repository']['html_url'].replace('https://github.com/', '')
params = {
'username': body['member']['login'],
'user_link': self.build_redirect_link('github', 'member', user_link),
'sender_name': body['sender']['login'],
'sender_link': self.build_redirect_link('github', 'member', sender_link),
'org_name': body['organization']['login'],
}
message = "[{username}]({user_link}) joined the {org_name} organisation"
action = body.get('action', 'added')
if action == 'edited':
message = "[{sender_name}]({sender_link}) edited permissions from [{username}]({user_link}) at {org_name}"
if action == 'removed':
message = "[{sender_name}]({sender_link}) removed [{username}]({user_link}) from {org_name}"
message = message.format(**params)
return {"default": str(message)}
def get_redirect(self, request, event, params):
redirect = {
'meta_title': '',
'meta_summary': '',
'poster_image': '',
'redirect': 'https://github.com/' + params,
'status_code': 404
}
return redirect
| apache-2.0 | Python | |
f01600491717c46103b27668e5d43e44a7dfa3b8 | Add customer tests | paramono/amocrm | tests/test_customer.py | tests/test_customer.py | import unittest
from unittest import skip
from unittest.mock import patch
from amocrm.exceptions import MissingArgument
from amocrm.entities import Customer
from .base_mocksettings import BaseMockSettingsTest
class TestCustomer(BaseMockSettingsTest):
def test_Customer_todict_returns_dict(self):
amocustomer = Customer(
name=self.name,
status_id=123,
)
d = amocustomer.todict()
self.assertIsNotNone(d)
self.assertIsInstance(d, dict)
def test_Customer_raises_on_empty_name(self):
with self.assertRaises(MissingArgument):
Customer(status_id=123)
def test_Customer_builds_correct_dict(self):
name = 'Vasya'
status_id = 123
amocustomer = Customer(
name=name,
status_id=status_id,
)
d = amocustomer.todict()
self.assertEqual(d, {
"request": {
"customers": {
"add": [{
"name": name,
}]
}
}
})
def test_Customer_is_json_serializable(self):
amocustomer = Customer(
name=self.name,
status_id=123
)
try:
str(amocustomer)
except TypeError as e:
self.fail('Customer is not JSON serializable!')
def test_Customer_accepts_single_tag(self):
tag = 'some tag'
amocustomer = Customer(
name=self.name,
fields=self.amofields,
tags=tag,
)
self.assertEqual(
amocustomer.tags,
[tag],
)
d = amocustomer.todict()
self.assertEqual(d, {
"request": {
"customers": {
"add": [{
"name": self.name,
"tags": tag,
}]
}
}
})
def test_Customer_accepts_multiple_tags_as_list(self):
tags = ['master', 'amo']
amocustomer = Customer(
name=self.name,
fields=self.amofields,
tags=tags
)
self.assertEqual(
amocustomer.tags,
tags,
)
d = amocustomer.todict()
self.assertEqual(d, {
"request": {
"customers": {
"add": [{
"name": self.name,
"tags": ", ".join(tags),
}]
}
}
})
def test_Customer_accepts_multiple_tags_as_str(self):
tags = 'master,amo'
amocustomer = Customer(
name=self.name,
fields=self.amofields,
tags=tags
)
expected_result = ['master', 'amo']
self.assertEqual(
amocustomer.tags,
expected_result,
)
d = amocustomer.todict()
self.assertEqual(d, {
"request": {
"customers": {
"add": [{
"name": self.name,
"tags": ', '.join(expected_result),
}]
}
}
})
| bsd-3-clause | Python | |
1c7f6a6c44af9c2de372fb2c07469da29bc11764 | Add tests for encoding subsystem | prophile/libdiana | tests/test_encoding.py | tests/test_encoding.py | from diana.encoding import encode, decode
from nose.tools import eq_
DECODE_TESTS = [ ('', (), ()),
('b', (0x00,), (0,)),
('BB', (0x12, 0xfe), (0x12, 0xfe)),
('bb', (0x12, 0xfe), (0x12, -2)),
('s', (0x12, 0x34), (0x3412,)),
('s', (0xff, 0xff), (-1,)),
('S', (0xff, 0xff), (0xffff,)),
('i', (0x12, 0x34, 0x56, 0x78), (0x78563412,)),
('I', (0xff, 0xff, 0xff, 0xff), (0xffffffff,)),
('i', (0xff, 0xff, 0xff, 0xff), (-1,)),
('f', (0x00, 0x00, 0x80, 0x3f), (1.0,)),
('u', (0x05, 0x00, 0x00, 0x00,
0x62, 0x00, 0x65, 0x00,
0x65, 0x00, 0x73, 0x00,
0x00, 0x00), ('bees',)),
('[B]', (0x12, 0x34, 0x56, 0x78),
([(0x12,), (0x34,), (0x56,), (0x78,)],)),
('[BB]', (0x12, 0x34, 0x56, 0x78),
([(0x12, 0x34), (0x56, 0x78)],)),
('B[BB]B', (0x12, 0x34, 0x56, 0x78),
(0x12, [(0x34, 0x56)], 0x78)),
('B[]', (0x12,), (0x12, [])) ]
def test_encode():
def code(fmt, coded, uncoded):
data = bytes(coded)
output = encode(fmt, uncoded)
eq_(output, data)
for fmt, coded, uncoded in DECODE_TESTS:
yield code, fmt, coded, uncoded
def test_decode():
def code(fmt, coded, uncoded):
data = bytes(coded)
output = decode(fmt, data)
eq_(output, uncoded)
for fmt, coded, uncoded in DECODE_TESTS:
yield code, fmt, coded, uncoded
| mit | Python | |
93a1ff67e62d0508744420cab8263a8cc893b119 | Add example listing backup counts | uroni/urbackup-server-python-web-api-wrapper | test/list_backup_counts.py | test/list_backup_counts.py | import urbackup_api
server = urbackup_api.urbackup_server("http://127.0.0.1:55414/x", "admin", "foo")
clients = server.get_status()
for client in clients:
file_backups = server.get_clientbackups(client["id"])
incr_file = 0
full_file = 0
for file_backup in file_backups:
if file_backup["incremental"]>0:
full_file+=1
else:
incr_file+=1
incr_image = 0
full_image = 0
image_backups = server.get_clientimagebackups(client["id"])
for image_backup in image_backups:
if image_backup["letter"]=="SYSVOL" or image_backup["letter"]=="ESP":
continue
if image_backup["incremental"]>0:
full_image+=1
else:
incr_image+=1
print("Client {clientname} has {incr_file} incr file backups, {full_file} "
"full file backups, {incr_image} incr image backups and "
"{full_image} full image backups".format(
incr_file=incr_file, clientname=client["name"],
full_file=full_file, incr_image=incr_image,
full_image=full_image) )
| apache-2.0 | Python | |
eafd611428c4206ba15bada0996b362ee74e0d3a | Create app.py | EricSchles/alert_system,EricSchles/alert_system | server/app.py | server/app.py | #server
from flask import Flask, render_template, request
import requests
import pickle
from mapper import Mapper
from multiprocessing import Process
import json
import time
from subprocess import call
app = Flask(__name__)
@app.route("/remove/<website>",methods=["GET","POST"])
def remove(website):
websites = json.load(open("websites.json","r"))
try:
websites.remove(website)
except ValueError:
return "failure"
json.dump(websites,open("websites.json","w"))
return "success"
@app.route("/websites",methods=["GET"])
def websites():
return pickle.dumps(json.load(open("websites.json","r")))
@app.route("/email/<address>", methods=["GET","POST"])
def email_update(address):
emails = pickle.load(open("emails.p","rb"))
emails.append(address)
pickle.dump(emails,open("emails.p","wb"))
return "success - email added"
@app.route("/phone_number/<number>", methods=["GET","POST"])
def phone_update(number):
numbers = pickle.load(open("numbers.p","rb"))
numbers.append(number)
pickle.dump(numbers,open("numbers.p","wb"))
return "success - phone number added"
@app.route("/map/<website>",methods=["GET","POST"])
def map(website):
print website
websites = json.load(open("websites.json","r"))
if not website in websites:
websites.append(website)
json.dump(websites, open("websites.json","w"))
tmp_website = "https://"+website
try:
website = requests.get(tmp_website).url
except requests.exceptions.SSLError:
website = requests.get("http://"+website).url
finally:
if not website:
return "failure"
m = Mapper(website)
print "Started scraping links for "+ website
call(["python","worker.py",pickle.dumps(m),website,str(1),pickle.dumps([])])
return "success"
return "exists"
app.run(
debug=True,
port=5001,
threaded=True
)
| apache-2.0 | Python | |
16f0ec2d0e5c33126ddb01604213c6a14115e605 | Add basic test of pocket_parser. | salilab/cryptosite,salilab/cryptosite,salilab/cryptosite | test/test_pocket_parser.py | test/test_pocket_parser.py | import unittest
import utils
import os
import sys
import re
import subprocess
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(TOPDIR)
import pocket_parser
class Tests(unittest.TestCase):
def test_get_cnc(self):
"""Test get_cnc() function"""
res = pocket_parser.get_cnc(os.path.join(TOPDIR, 'test',
'input', 'test.pdb'),
None)
self.assertEqual(len(res), 8)
self.assertEqual(res[('ILE', 9, 'A')], 0.0)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | Python | |
4f722f576f2e14da5373b0a2da3e6ba5d94e37d9 | add simple dumb script to find XOR keys that are revealed by padding | armijnhemel/binaryanalysis | src/scripts/findxor.py | src/scripts/findxor.py | #!/usr/bin/python
## Binary Analysis Tool
## Copyright 2015 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
Find XOR key using some very superdumb methods.
The idea is to exploit the idea that padding is used in firmwares. Usually padding
consists of NUL bytes. When XORing the key with NUL bytes the result will be the key.
Often it is very easy to see the key in plain sight using for example the command
"hexdump -C".
In this script it is assumed (for now) that the keylength is 16 and that there is just
one single key used. Manual inspection is definitely needed.
'''
import sys, os, collections
from optparse import OptionParser
def findpadding(firmware):
counter = collections.Counter()
fwfile = open(firmware)
firmwarebytes = fwfile.read()
fwfile.close()
fwlen = len(firmwarebytes)
blocks = fwlen/16
byteblocks = []
for i in xrange(0, blocks):
byteblocks.append(firmwarebytes[i*16:i*16+16])
counter.update(byteblocks)
rank = 1
reportamount = 10
print "MOST COMMON, TOP %d" % reportamount
for i in counter.most_common(reportamount):
print rank, i[1], map(lambda x: hex(ord(x)), i[0])
rank += 1
def main(argv):
parser = OptionParser()
parser.add_option("-f", "--firmware", action="store", dest="firmware", help="path to firmware", metavar="FILE")
(options, args) = parser.parse_args()
if options.firmware == None:
parser.exit("Path to firmware not supplied, exiting")
findpadding(options.firmware)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | Python | |
7021806e9e510286424dae696c2f4eee0a70b630 | Define default crispy form helper | nigma/djutil | src/forms.py | src/forms.py | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
import crispy_forms.helper
class DefaultFormHelper(crispy_forms.helper.FormHelper):
def __init__(self, form=None):
super(DefaultFormHelper, self).__init__(form=form)
self.form_class = "form-horizontal"
self.html5_required = True
self.help_text_inline = True
| mit | Python | |
a7820c3a317c9d6ff2f3585eecd3e05c47364afe | Create A_Okubo_weiss_results.py | Herpinemmanuel/Oceanography | Cas_6/Okubo_Weiss/A_Okubo_weiss_results.py | Cas_6/Okubo_Weiss/A_Okubo_weiss_results.py | # First iteration
![alt tab]()
# Last iteration
![alt tab]()
# Movie
![alt tab]()
| mit | Python | |
49db30bdb872d88c343a2ab480c294d1802b301b | Create trash.py | Programmeerclub-WLG/Agenda-App | gui/trash.py | gui/trash.py | apache-2.0 | Python | ||
a8848ccb72c2513b7c42809d43359023ff158db6 | Add sync file, not using it yet though | phdesign/flickr-rsync | modules/sync.py | modules/sync.py | # -*- coding: utf-8 -*-
import operator
class Sync(object):
def __init__(self):
pass
def run(self, from_storage, to_storage):
self._from_storage = from_storage
self._to_storage = to_storage
self._from_folders = sorted(from_storage.list_folders(), key=lambda x: x.name)
self._to_folders = sorted(to_storage.list_folders(), key=lambda x: x.name)
def _shallow(self):
for from_folder in self._from_folders:
exists = False
for to_folder in self._to_folders:
if from_folder.name == to_folder.name:
exists = True
break
if from_folder.name > to_folder.name:
break
if not exists:
self._copy_folder(from_folder)
def _deep(self):
pass
def _copy_folder(self, folder):
print "Copying folder " + folder.name
| mit | Python | |
08ee789d0dc6b4b29e25dc23683d6874c886a69d | Include generate-credentials.py from ga-collector | alphagov/performanceplatform-collector,alphagov/performanceplatform-collector,alphagov/performanceplatform-collector | tools/generate-credentials.py | tools/generate-credentials.py | """
Generate required credentials files from a downloaded client_secret.json.
When setting up a Google API account you are provided with a
client_secret.json. You need to generate refresh and access tokens to use the
API.
For more information see:
http://bit.ly/py-oauth-docs
Call this tool with the path to your downloaded client_secret.json as
the only argument. The credentials file in ./config/credentials.json will be
updated.
"""
import argparse
import json
from os.path import abspath
from gapy.client import from_secrets_file
import oauth2client.tools
def copy_json(input_path, output_path):
with open(input_path) as input:
with open(output_path, "w+") as output:
json.dump(
json.load(input),
output,
indent=2)
def generate_stuff(client_secret):
# Prevent oauth2client from trying to open a browser
# This is run from inside the VM so there is no browser
oauth2client.tools.FLAGS.auth_local_webserver = False
storage_path = abspath("./config/storage.db")
secret_path = abspath("./config/client_secret.json")
from_secrets_file(
client_secret,
storage_path=storage_path)
copy_json(client_secret, secret_path)
with open('./config/credentials.json', 'w+') as f:
credentials = {
"CLIENT_SECRETS": secret_path,
"STORAGE_PATH": storage_path,
}
json.dump(credentials, f, indent=2)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'client_secret',
help='path to the client secrets file from the Google API Console')
args = parser.parse_args()
generate_stuff(args.client_secret)
| mit | Python | |
8919cf7171d7a659c0b90c41f2520029bab1423e | Add tool to extract build process from .travis.yml | advancedtelematic/sota_client_cpp,advancedtelematic/aktualizr,advancedtelematic/aktualizr,advancedtelematic/aktualizr,advancedtelematic/aktualizr,advancedtelematic/sota_client_cpp | scripts/run_travis.py | scripts/run_travis.py | #!/usr/bin/env python3
import argparse
import sys
import pprint
import shlex
import yaml
from pathlib import Path
def gen_test_script(ty, job, output):
output.write('#!/usr/bin/env bash\n\n')
output.write('set -ex\n')
# extract environment variables
e_str = ty['env'][job]
for v_assign in shlex.split(e_str):
output.write(v_assign + '\n')
output.write('\n')
# extract script lines
for l in ty['script']:
output.write(l + '\n')
def main():
parser = argparse.ArgumentParser(description='Run travis jobs locally')
parser.add_argument('--yml', '-y', metavar='travis.yml', type=Path,
default=Path('.travis.yml'), help='.travis.yml file')
parser.add_argument('--job', '-j', metavar='JOB', type=int, default=0)
parser.add_argument('--output', '-o', metavar='OUTPUT.sh', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
ymlf = args.yml
with open(ymlf, 'r') as f:
yml = f.read()
ty = yaml.load(yml)
if args.verbose:
pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
pp.pprint(ty)
gen_test_script(ty, args.job, args.output)
if __name__ == '__main__':
sys.exit(main())
| mpl-2.0 | Python | |
015291b5d894d0d7a74725bf0846ad88e5c85522 | Add a setup file | yakovenkodenis/websockets_secure_chat,yakovenkodenis/websockets_secure_chat,yakovenkodenis/websockets_secure_chat,yakovenkodenis/websockets_secure_chat | PythonClient/setup.py | PythonClient/setup.py | from distutils.core import setup
setup(name='PythonClient',
version='1.0',
description='Python Distribution Utilities',
author='Denis Yakovenko',
author_email='yakovenko.denis.a@gmail.com',
url='https://www.github.com/yakovenkodenis/websockets_secure_chat',
packages=['diffie_hellman', 'hashes'])
| mit | Python | |
d186c80feb7dee875a1a7debfd115e100dc3fca1 | Add a cronjob script for sending studentvoice notifications. | enjaz/enjaz,osamak/student-portal,osamak/student-portal,osamak/student-portal,enjaz/enjaz,enjaz/enjaz,enjaz/enjaz,osamak/student-portal,enjaz/enjaz,osamak/student-portal | send_studentvoices.py | send_studentvoices.py | import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "studentportal.settings")
from django.core.urlresolvers import reverse
from django.conf import settings
from post_office import mail
from studentvoice.models import Voice
for voice in Voice.objects.filter(was_sent=False, parent__isnull=True,
is_published=True,
response__isnull=True,
score__gte=settings.STUDENTVOICE_THRESHOLD):
url = reverse('studentvoice:show', args=(voice.pk,))
email_context = {'voice': voice, 'url': url}
print "Handling voice #%d..." % voice.pk
# Send notification to the voice recipient
print "Preparing recipient email to %s..." % voice.recipient.email
if voice.recipient.secondary_email:
secondary_email = [voice.recipient.secondary_email]
print "Adding secondary_email, as CC."
else:
secondary_email = None
mail.send([voice.recipient.email], cc=secondary_email,
template="studentvoice_threshold_recipient",
context=email_context)
# Send notification to the voice submitter
print "Preparing submitter email to %s..." % voice.submitter.email
mail.send([voice.submitter.email],
template="studentvoice_threshold_submitter",
context=email_context)
# Send notification to the those who voted in favor of the voice
for vote in voice.vote_set.filter(is_counted=True, vote_type='U'):
print "Preparing voter email to %s..." % vote.submitter.email
email_context['vote'] = vote
mail.send([vote.submitter.email],
template="studentvoice_threshold_voter",
context=email_context)
voice.was_sent = True
voice.is_editable = False
voice.save()
| agpl-3.0 | Python | |
f43b9e44d30c9ac2800ca7559bdff84d06d64bc5 | Add LDAP code | oscharvard/Flask-CAS | flask_cas/osc_ldap.py | flask_cas/osc_ldap.py | import json
import ldap
import sys
import hashlib
from flask import current_app, session
ldap.set_option(ldap.OPT_DEBUG_LEVEL, 0)
# turn off referrals
ldap.set_option(ldap.OPT_REFERRALS, 0)
# version 3
ldap.set_option (ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3)
# allow self-signed cert
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
def ldap_lookup(username):
CUSTOMER_NAME = current_app.config['CUSTOMER_NAME']
CUSTOMER_PW = current_app.config['CUSTOMER_PW']
LDAP_URL = current_app.config['LDAP_URL']
DASH_SALT = current_app.config['DASH_SALT']
output = {}
m = hashlib.md5()
m.update(username)
m.update(DASH_SALT)
output['person_id'] = m.hexdigest()
ad_bind_usr = 'uid=%s, ou=applications,o=Harvard University Core,dc=huid,dc=harvard,dc=edu' % CUSTOMER_NAME
ad_bind_pw = CUSTOMER_PW
l = ldap.initialize(LDAP_URL,trace_level=0)
l.simple_bind_s(ad_bind_usr, ad_bind_pw)
FIELDS_TO_RETURN = ['sn', 'givenName', 'displayName', 'mail']
AD_SEARCH_DN = "ou=people, o=Harvard University Core, dc=huid, dc=harvard, dc=edu"
search_filter = '(harvardEduIDNumber=%s)' % username
results = l.search_ext_s(AD_SEARCH_DN,ldap.SCOPE_SUBTREE, search_filter,FIELDS_TO_RETURN)
if results:
try:
cn, lu = results[0]
for k, v in lu.iteritems():
# take first value only?
output[k] = v[0]
except:
pass
#return json.dumps(output)
return output
# make this look in CAS attributes for LDAP attributes
def get_ldap_attribute(attr):
try:
# this is wrong:
raw_ldap_attributes = session[current_app.config['LDAP_ATTRIBUTES_SESSION_KEY']]
except:
return None
ldap_attributes = json.loads(raw_ldap_attributes)
if attr in ldap_attributes:
return ldap_attributes[attr]
else:
return None
| bsd-3-clause | Python | |
102f10b680335c970a8322cda35cccd347a76baf | define template for performing mashup | rfaulkner/Flickipedia,rfaulkner/Flickipedia,rfaulkner/Flickipedia,rfaulkner/Flickipedia,rfaulkner/Flickipedia | flickipedia/mashup.py | flickipedia/mashup.py | """
Author: Ryan Faulkner
Date: October 19th, 2014
Container for mashup logic.
"""
def get_article_count():
"""
Fetch total article count
:return: int; total count of articles
"""
pass
def get_max_article_id():
"""
Fetch the maximum article ID
:return: int; maximum id from article meta
"""
pass
def get_article_object(article):
"""
Fetch corresponding article object
:param article: str; article name
:return: Article; corresponding article model object
"""
pass
def get_wiki_content(article):
"""
Retrieve the wiki content from the mediawiki API
:param article: str; article name
:return: Wikipedia; mediawiki api response object
"""
pass
def get_flickr_photos(article):
"""
Retrience Flickr photo content from Flickr API
:param article: str; article name
:return: list; list of Flickr photo json
"""
pass
def manage_article_storage(max_article_id):
"""
Handle the storage of new articles
:param max_article_id: int; article id
:return: bool; success
"""
pass
def handle_article_insert(article_id):
"""
Handle insertion of article meta data
:param article_id: int; article id
:return: bool; success
"""
pass
def handle_article_content_insert(article_id, page_content):
"""
Handle the insertion of article content
:param article_id: int; article id
:param page_content: json; page content
:return: bool; success
"""
pass
def prep_page_content(wiki_resp, photos):
"""
Prepare the formatted article content
:param wiki_resp: wikipedia; mediawiki api response
:param photos: list; list of photo json
:return: dict; formatted page response passed to jinja template
"""
pass
def update_last_access(article_id):
"""
Update article last access
:param article_id: int; article id
:return: bool; success
"""
pass | bsd-2-clause | Python | |
b3161dce27574146ccb2df509a4846b51e67dfb4 | check a request for a still-not-closed MUC channel fails | Ziemin/telepathy-gabble,mlundblad/telepathy-gabble,mlundblad/telepathy-gabble,jku/telepathy-gabble,jku/telepathy-gabble,mlundblad/telepathy-gabble,Ziemin/telepathy-gabble,jku/telepathy-gabble,Ziemin/telepathy-gabble,Ziemin/telepathy-gabble | tests/twisted/muc/presence-before-closing.py | tests/twisted/muc/presence-before-closing.py | """
Test for fd.o#19930.
"""
import dbus
from twisted.words.xish import domish
from gabbletest import exec_test, make_result_iq
from servicetest import (EventPattern, assertEquals, assertLength,
assertContains, sync_dbus, call_async)
import constants as cs
import ns
from mucutil import join_muc, echo_muc_presence
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
room = 'test@conf.localhost'
room_handle, chan, path, props, disco = join_muc(q, bus, conn, stream,
room,
also_capture=[EventPattern('stream-iq', iq_type='get',
query_name='query', query_ns=ns.DISCO_INFO, to=room)])
sync_dbus(bus, q, conn)
# we call Close...
call_async(q, chan, 'Close')
q.expect('dbus-return', method='Close')
# ...so gabble announces our unavailable presence to the MUC.
event = q.expect('stream-presence', to=room + '/test')
elem = event.stanza
assertEquals('unavailable', elem['type'])
# while we wait for the conference server to echo our unavailable
# presence, we try and create the same channel again...
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_TEXT,
cs.TARGET_HANDLE_TYPE: cs.HT_ROOM,
cs.TARGET_ID: room
})
# ...which should fail because the channel hasn't closed yet.
q.expect('dbus-error', method='CreateChannel', name=cs.NOT_AVAILABLE)
# the conference server finally gets around to echoing our
# unavailable presence...
echo_muc_presence(q, stream, elem, 'none', 'participant')
# ...and only now is the channel closed.
q.expect_many(EventPattern('dbus-signal', signal='Closed'),
EventPattern('dbus-signal', signal='ChannelClosed'))
if __name__ == '__main__':
exec_test(test)
| """
Test for fd.o#19930.
"""
import dbus
from twisted.words.xish import domish
from gabbletest import exec_test, make_result_iq
from servicetest import (EventPattern, assertEquals, assertLength,
assertContains, sync_dbus, call_async)
import constants as cs
import ns
from mucutil import join_muc, echo_muc_presence
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged',
args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
room = 'test@conf.localhost'
room_handle, chan, path, props, disco = join_muc(q, bus, conn, stream,
room,
also_capture=[EventPattern('stream-iq', iq_type='get',
query_name='query', query_ns=ns.DISCO_INFO, to=room)])
sync_dbus(bus, q, conn)
# we call Close...
call_async(q, chan, 'Close')
q.expect('dbus-return', method='Close')
# ...so gabble announces our unavailable presence to the MUC...
event = q.expect('stream-presence', to=room + '/test')
elem = event.stanza
assertEquals('unavailable', elem['type'])
# ...which the conference server echos...
echo_muc_presence(q, stream, elem, 'none', 'participant')
# ...and only now is the channel closed.
q.expect_many(EventPattern('dbus-signal', signal='Closed'),
EventPattern('dbus-signal', signal='ChannelClosed'))
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 | Python |
d30e019459d1ef026b95739716d2d3a7d791575e | Add bytearray basic tests. | methoxid/micropystat,blmorris/micropython,blazewicz/micropython,rubencabrera/micropython,jlillest/micropython,toolmacher/micropython,ahotam/micropython,emfcamp/micropython,methoxid/micropystat,mpalomer/micropython,vitiral/micropython,orionrobots/micropython,firstval/micropython,kerneltask/micropython,alex-march/micropython,adafruit/micropython,mgyenik/micropython,noahchense/micropython,rubencabrera/micropython,HenrikSolver/micropython,pozetroninc/micropython,galenhz/micropython,cnoviello/micropython,ganshun666/micropython,HenrikSolver/micropython,ryannathans/micropython,blazewicz/micropython,mgyenik/micropython,torwag/micropython,puuu/micropython,adamkh/micropython,slzatz/micropython,warner83/micropython,ericsnowcurrently/micropython,hosaka/micropython,ernesto-g/micropython,SungEun-Steve-Kim/test-mp,dxxb/micropython,pramasoul/micropython,ericsnowcurrently/micropython,adamkh/micropython,vriera/micropython,lowRISC/micropython,deshipu/micropython,oopy/micropython,Timmenem/micropython,cnoviello/micropython,redbear/micropython,alex-march/micropython,chrisdearman/micropython,skybird6672/micropython,cnoviello/micropython,PappaPeppar/micropython,omtinez/micropython,cwyark/micropython,AriZuu/micropython,ganshun666/micropython,ganshun666/micropython,neilh10/micropython,vriera/micropython,blazewicz/micropython,warner83/micropython,ceramos/micropython,Vogtinator/micropython,Vogtinator/micropython,paul-xxx/micropython,tdautc19841202/micropython,warner83/micropython,rubencabrera/micropython,infinnovation/micropython,deshipu/micropython,drrk/micropython,tralamazza/micropython,neilh10/micropython,deshipu/micropython,feilongfl/micropython,ganshun666/micropython,MrSurly/micropython-esp32,TDAbboud/micropython,ahotam/micropython,xhat/micropython,AriZuu/micropython,blazewicz/micropython,misterdanb/micropython,praemdonck/micropython,SHA2017-badge/micropython-esp32,cloudformdesign/micropython,deshipu/micropython,warner83/micropython,noahwilliamsson/micropython,Timmenem/micropython,torwag/micropython,MrSurly/micropython-esp32,pfalcon/micropython,HenrikSolver/micropython,trezor/micropython,aitjcize/micropython,ceramos/micropython,kostyll/micropython,cwyark/micropython,puuu/micropython,mianos/micropython,cnoviello/micropython,Vogtinator/micropython,suda/micropython,ahotam/micropython,tuc-osg/micropython,tuc-osg/micropython,bvernoux/micropython,swegener/micropython,ceramos/micropython,xhat/micropython,feilongfl/micropython,turbinenreiter/micropython,slzatz/micropython,neilh10/micropython,mianos/micropython,EcmaXp/micropython,utopiaprince/micropython,ahotam/micropython,danicampora/micropython,selste/micropython,adafruit/micropython,torwag/micropython,danicampora/micropython,rubencabrera/micropython,Peetz0r/micropython-esp32,alex-robbins/micropython,xuxiaoxin/micropython,mpalomer/micropython,drrk/micropython,rubencabrera/micropython,vriera/micropython,SungEun-Steve-Kim/test-mp,dhylands/micropython,dinau/micropython,infinnovation/micropython,skybird6672/micropython,emfcamp/micropython,lbattraw/micropython,dxxb/micropython,xyb/micropython,stonegithubs/micropython,jlillest/micropython,torwag/micropython,methoxid/micropystat,Peetz0r/micropython-esp32,cloudformdesign/micropython,dinau/micropython,martinribelotta/micropython,alex-march/micropython,dxxb/micropython,ruffy91/micropython,paul-xxx/micropython,tdautc19841202/micropython,slzatz/micropython,micropython/micropython-esp32,supergis/micropython,emfcamp/micropython,bvernoux/micropython,misterdanb/micropython,neilh10/micropython,stonegithubs/micropython,SungEun-Steve-Kim/test-mp,hiway/micropython,ryannathans/micropython,ChuckM/micropython,blmorris/micropython,alex-robbins/micropython,ChuckM/micropython,oopy/micropython,firstval/micropython,kostyll/micropython,danicampora/micropython,orionrobots/micropython,hiway/micropython,misterdanb/micropython,jimkmc/micropython,toolmacher/micropython,cnoviello/micropython,pramasoul/micropython,EcmaXp/micropython,jimkmc/micropython,Peetz0r/micropython-esp32,tralamazza/micropython,emfcamp/micropython,hiway/micropython,kerneltask/micropython,mpalomer/micropython,adafruit/circuitpython,galenhz/micropython,ahotam/micropython,mgyenik/micropython,vitiral/micropython,suda/micropython,puuu/micropython,heisewangluo/micropython,toolmacher/micropython,danicampora/micropython,ruffy91/micropython,jimkmc/micropython,TDAbboud/micropython,chrisdearman/micropython,ericsnowcurrently/micropython,skybird6672/micropython,blmorris/micropython,aethaniel/micropython,dinau/micropython,firstval/micropython,bvernoux/micropython,kerneltask/micropython,aethaniel/micropython,aitjcize/micropython,supergis/micropython,utopiaprince/micropython,heisewangluo/micropython,emfcamp/micropython,SungEun-Steve-Kim/test-mp,toolmacher/micropython,ChuckM/micropython,tdautc19841202/micropython,methoxid/micropystat,vriera/micropython,swegener/micropython,feilongfl/micropython,SHA2017-badge/micropython-esp32,vitiral/micropython,tuc-osg/micropython,micropython/micropython-esp32,hiway/micropython,KISSMonX/micropython,misterdanb/micropython,vitiral/micropython,redbear/micropython,Peetz0r/micropython-esp32,Vogtinator/micropython,alex-robbins/micropython,PappaPeppar/micropython,noahwilliamsson/micropython,tuc-osg/micropython,MrSurly/micropython-esp32,mhoffma/micropython,paul-xxx/micropython,redbear/micropython,lowRISC/micropython,aitjcize/micropython,SungEun-Steve-Kim/test-mp,noahchense/micropython,paul-xxx/micropython,adafruit/circuitpython,noahchense/micropython,skybird6672/micropython,lowRISC/micropython,henriknelson/micropython,dmazzella/micropython,blmorris/micropython,kerneltask/micropython,Timmenem/micropython,aethaniel/micropython,henriknelson/micropython,adafruit/micropython,adafruit/circuitpython,pfalcon/micropython,tobbad/micropython,dxxb/micropython,pramasoul/micropython,dhylands/micropython,infinnovation/micropython,jlillest/micropython,drrk/micropython,feilongfl/micropython,MrSurly/micropython,redbear/micropython,turbinenreiter/micropython,matthewelse/micropython,lbattraw/micropython,dhylands/micropython,dmazzella/micropython,ryannathans/micropython,pfalcon/micropython,dinau/micropython,jimkmc/micropython,pozetroninc/micropython,slzatz/micropython,pfalcon/micropython,Timmenem/micropython,hosaka/micropython,selste/micropython,mpalomer/micropython,tdautc19841202/micropython,hosaka/micropython,ChuckM/micropython,cloudformdesign/micropython,micropython/micropython-esp32,adafruit/micropython,redbear/micropython,MrSurly/micropython,SHA2017-badge/micropython-esp32,puuu/micropython,galenhz/micropython,jmarcelino/pycom-micropython,KISSMonX/micropython,kostyll/micropython,KISSMonX/micropython,trezor/micropython,alex-robbins/micropython,SHA2017-badge/micropython-esp32,turbinenreiter/micropython,ernesto-g/micropython,danicampora/micropython,blmorris/micropython,xhat/micropython,adamkh/micropython,lbattraw/micropython,ernesto-g/micropython,AriZuu/micropython,MrSurly/micropython-esp32,stonegithubs/micropython,MrSurly/micropython,tdautc19841202/micropython,stonegithubs/micropython,mhoffma/micropython,feilongfl/micropython,noahwilliamsson/micropython,ryannathans/micropython,pramasoul/micropython,cwyark/micropython,ericsnowcurrently/micropython,AriZuu/micropython,EcmaXp/micropython,jlillest/micropython,galenhz/micropython,vriera/micropython,kostyll/micropython,HenrikSolver/micropython,pozetroninc/micropython,drrk/micropython,ruffy91/micropython,alex-march/micropython,cwyark/micropython,praemdonck/micropython,Timmenem/micropython,lowRISC/micropython,selste/micropython,jmarcelino/pycom-micropython,adafruit/circuitpython,dhylands/micropython,selste/micropython,skybird6672/micropython,tobbad/micropython,heisewangluo/micropython,swegener/micropython,aitjcize/micropython,jimkmc/micropython,chrisdearman/micropython,pozetroninc/micropython,mianos/micropython,KISSMonX/micropython,selste/micropython,xyb/micropython,ganshun666/micropython,dmazzella/micropython,micropython/micropython-esp32,mianos/micropython,hosaka/micropython,tuc-osg/micropython,warner83/micropython,utopiaprince/micropython,xhat/micropython,trezor/micropython,praemdonck/micropython,stonegithubs/micropython,omtinez/micropython,martinribelotta/micropython,noahchense/micropython,ernesto-g/micropython,AriZuu/micropython,trezor/micropython,lbattraw/micropython,slzatz/micropython,Vogtinator/micropython,pozetroninc/micropython,orionrobots/micropython,dinau/micropython,KISSMonX/micropython,suda/micropython,praemdonck/micropython,xyb/micropython,bvernoux/micropython,matthewelse/micropython,kerneltask/micropython,mhoffma/micropython,pfalcon/micropython,PappaPeppar/micropython,drrk/micropython,neilh10/micropython,noahwilliamsson/micropython,matthewelse/micropython,TDAbboud/micropython,galenhz/micropython,supergis/micropython,xhat/micropython,cwyark/micropython,oopy/micropython,pramasoul/micropython,orionrobots/micropython,MrSurly/micropython,chrisdearman/micropython,mhoffma/micropython,jlillest/micropython,ChuckM/micropython,misterdanb/micropython,xuxiaoxin/micropython,cloudformdesign/micropython,infinnovation/micropython,heisewangluo/micropython,tobbad/micropython,ericsnowcurrently/micropython,ryannathans/micropython,mgyenik/micropython,methoxid/micropystat,jmarcelino/pycom-micropython,praemdonck/micropython,bvernoux/micropython,xyb/micropython,jmarcelino/pycom-micropython,puuu/micropython,adamkh/micropython,Peetz0r/micropython-esp32,tralamazza/micropython,swegener/micropython,supergis/micropython,martinribelotta/micropython,hosaka/micropython,mpalomer/micropython,firstval/micropython,TDAbboud/micropython,omtinez/micropython,lbattraw/micropython,suda/micropython,hiway/micropython,ruffy91/micropython,blazewicz/micropython,oopy/micropython,HenrikSolver/micropython,oopy/micropython,xuxiaoxin/micropython,ceramos/micropython,dxxb/micropython,TDAbboud/micropython,tobbad/micropython,henriknelson/micropython,henriknelson/micropython,heisewangluo/micropython,noahwilliamsson/micropython,omtinez/micropython,turbinenreiter/micropython,alex-robbins/micropython,utopiaprince/micropython,dmazzella/micropython,aethaniel/micropython,deshipu/micropython,alex-march/micropython,ruffy91/micropython,henriknelson/micropython,xyb/micropython,lowRISC/micropython,suda/micropython,turbinenreiter/micropython,matthewelse/micropython,adafruit/circuitpython,mhoffma/micropython,supergis/micropython,swegener/micropython,orionrobots/micropython,adamkh/micropython,firstval/micropython,dhylands/micropython,mianos/micropython,adafruit/micropython,infinnovation/micropython,torwag/micropython,toolmacher/micropython,matthewelse/micropython,micropython/micropython-esp32,utopiaprince/micropython,omtinez/micropython,chrisdearman/micropython,trezor/micropython,noahchense/micropython,ernesto-g/micropython,xuxiaoxin/micropython,martinribelotta/micropython,martinribelotta/micropython,aethaniel/micropython,mgyenik/micropython,PappaPeppar/micropython,EcmaXp/micropython,ceramos/micropython,kostyll/micropython,cloudformdesign/micropython,MrSurly/micropython-esp32,adafruit/circuitpython,xuxiaoxin/micropython,matthewelse/micropython,SHA2017-badge/micropython-esp32,tobbad/micropython,PappaPeppar/micropython,EcmaXp/micropython,tralamazza/micropython,MrSurly/micropython,paul-xxx/micropython,vitiral/micropython,jmarcelino/pycom-micropython | tests/basics/bytearray1.py | tests/basics/bytearray1.py | a = bytearray([1, 2, 200])
print(a[0], a[2])
print(a[-1])
a[2] = 255
print(a[-1])
a.append(10)
print(len(a))
s = 0
for i in a:
s += i
print(s)
| mit | Python | |
d8df1ca5d4c0c346496e2a190be8a63a7249e578 | Add sort code. | doggan/code-dump,doggan/code-dump,doggan/code-dump,doggan/code-dump,doggan/code-dump,doggan/code-dump,doggan/code-dump,doggan/code-dump | sort/sort.py | sort/sort.py | #!/path/to/python
def merge_sort(items):
if items is None:
return
helper = []
merge_sort_impl(items, helper, 0, len(items))
def merge_sort_impl(items, helper, start, end):
if (end - start) <= 1:
return
mid = (end - start) / 2 + start
merge_sort_impl(items, helper, start, mid)
merge_sort_impl(items, helper, mid, end)
merge(items, helper, start, mid, end)
def merge(items, helper, start, mid, end):
leftStart = start
leftEnd = mid
rightStart = mid
rightEnd = end
while leftStart < leftEnd and rightStart < rightEnd:
if items[leftStart] < items[rightStart]:
helper.append(items[leftStart])
leftStart += 1
else:
helper.append(items[rightStart])
rightStart += 1
while leftStart < leftEnd:
helper.append(items[leftStart])
leftStart += 1
while rightStart < rightEnd:
helper.append(items[rightStart])
rightStart += 1
cnt = len(helper)
for i in range(0, cnt):
items[start + i] = helper[i]
del helper[:]
| unlicense | Python | |
a9f1ccaeae554ccc3515e1fc28e1bc4b3822b856 | Add base code for all examples | mohan3d/PyOpenload | examples/base.py | examples/base.py | from __future__ import print_function
from openload import OpenLoad
username = 'FTP Username/API Login'
key = 'FTP Password/API Key'
ol = OpenLoad(username, key)
| mit | Python | |
c5e83b84f490544c99bf119cc1fca0efee83e665 | Add test for manager.py | NeutronUfscarDatacom/DriverDatacom | dcclient/tests/test_manager.py | dcclient/tests/test_manager.py | import testtools
import dcclient.xml_manager.manager as mg
import dcclient.xml_manager.data_structures as ds
class main_test(testtools.TestCase):
def test_findVlan(self):
# tests if none type is return when vlan is not found
xml = mg.ManagedXml()
answer = xml.findVlan(61)
self.assertIs(answer, None)
# tests if Vlan_global object is return when vlan is found
xml.addVlan(60, name='vlan_test', ports=[1, 5, 9])
answer = xml.findVlan(60)
self.assertIsInstance(answer, ds.Vlan_global)
# tests if found vlan xml is correct
expected_xml = '<vlan_global id0="60"><vid>60</vid><active>1' + \
'</active><name>vlan_test</name>' + \
'<pbmp_untagged id0="0"><pbits id0="0">273</pbits>' + \
'</pbmp_untagged></vlan_global>'
self.assertEquals(expected_xml, answer.as_xml_text())
| apache-2.0 | Python | |
eb9ba88177ce23ef259b1731f02c38d0ccaa8318 | Add new script to build Dogecoin | rnicoll/robodoge | run_build.py | run_build.py | #!/usr/bin/python3
import re
import os
import string
import sys
import subprocess
import auto_merge
def compile_dogecoin():
path = os.getcwd()
subprocess.check_output([path + os.path.sep + 'autogen.sh'])
subprocess.check_output([path + os.path.sep + 'configure'])
subprocess.check_output(['make', 'clean'], stderr=subprocess.STDOUT)
subprocess.check_output(['make'], stderr=subprocess.STDOUT)
subprocess.check_output(['make', 'check'], stderr=subprocess.STDOUT)
return True
config = auto_merge.load_configuration('config.yml')
if not 'dogecoin_repo' in config:
print('Missing "dogecoin_repo" configuration.')
sys.exit(1)
if not config['dogecoin_repo']['path']:
print('Missing "dogecoin_repo" configuration.')
sys.exit(1)
cwd = os.getcwd()
os.chdir(config['dogecoin_repo']['path'])
os.chdir('..') # Go up to the directory above the Git repository
build_success = compile_dogecoin()
os.chdir(cwd)
| mit | Python | |
07e1dfcc4c76490a888eb1e93b649962d60a78fb | add unittest | hhatto/poyonga | test/test_poyonga.py | test/test_poyonga.py | import struct
import unittest
from mock import patch, Mock
from poyonga import Groonga, GroongaResult
class PoyongaHTTPTestCase(unittest.TestCase):
def setUp(self):
self.g = Groonga()
@patch('poyonga.client.urlopen')
def test_json_result_with_http(self, mock_urlopen):
m = Mock()
m.read.side_effect = ['[[0, 1337566253.89858, 0.000354], {}]']
mock_urlopen.return_value = m
ret = self.g.call('status')
self.assertEqual(type(ret), GroongaResult)
self.assertEqual(ret.status, 0)
class PoyongaGQTPTestCase(unittest.TestCase):
def setUp(self):
self.g = Groonga(protocol='gqtp')
@patch('poyonga.client.socket.socket')
def test_json_result_with_gqtp(self, mock_socket):
m = Mock()
_proto, _qtype, _keylen, _level, _flags, _status, _size, _opaque, _cas = \
0xc7, 0x02, 0, 0, 0, 0, 2, 0, 0
packdata = struct.pack("!BBHBBHIIQ",
_proto, _qtype, _keylen, _level, _flags, _status, _size, _opaque, _cas)
m.recv.return_value = packdata + "{}"
mock_socket.return_value = m
ret = self.g.call('status')
self.assertEqual(type(ret), GroongaResult)
self.assertEqual(ret.status, 0)
print(dir(ret))
print(ret.raw_result)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
9bb3e11a4a983d9e9215bcd8805360968e9afa7f | Add improved decorator | TwilioDevEd/webhooks-example-django | webhooks/decorators.py | webhooks/decorators.py | from django.conf import settings
from django.http import HttpResponseForbidden
from functools import wraps
from twilio.util import RequestValidator
import os
def validate_twilio_request(f):
"""Validates that incoming requests genuinely originated from Twilio"""
@wraps(f)
def decorated_function(request, *args, **kwargs):
# Create an instance of the RequestValidator class
validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))
# Validate the request using its URL, POST data,
# and X-TWILIO-SIGNATURE header
request_valid = validator.validate(
request.build_absolute_uri(),
request.POST,
request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))
# Continue processing the request if it's valid (or if DEBUG is True)
# and return a 403 error if it's not
if request_valid or settings.DEBUG:
return f(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return decorated_function
| mit | Python | |
2d2dabc82f2787b6c0a1bc6a5373daa891db4ce7 | Create command line file | ucaptmh/GreenGraph | greengraph/command.py | greengraph/command.py | __author__ = 'third'
from argparse import ArgumentParser
from greengraph.PlotGraph import PlotGraph
def process():
parser = ArgumentParser(
description="Produce a graph of number of green pixels in satellite images between two locations")
parser.add_argument("--start", required=True,
help='Starting location')
parser.add_argument("--end", required=True,
help='End location')
parser.add_argument("--steps", required=True,
help='Number of steps desired between starting and ending locations')
parser.add_argument("--out", required=True,
help="Filename of output")
arguments = parser.parse_args()
PlotGraph(arguments.start, arguments.to, arguments.steps, arguments.out)
if __name__ == "__main__":
process() | mit | Python | |
7691c9a886d76d6051b2ae3cb6e5750b2ba9c617 | add simple/dubiously effective tests for leg profile view | mileswwatkins/billy,openstates/billy,loandy/billy,sunlightlabs/billy,loandy/billy,sunlightlabs/billy,loandy/billy,openstates/billy,openstates/billy,sunlightlabs/billy,mileswwatkins/billy,mileswwatkins/billy | billy/web/public/tests/legislator_detail.py | billy/web/public/tests/legislator_detail.py |
import requests
from billy.models import db
def test_gets(states):
for leg in db.legislators.find({}, ['_id']):
requests.get('http://127.0.0.1:8000/ca/legislator/%s/' % leg['_id'])
if __name__ == '__main__':
import sys
test_gets(sys.argv[1:])
| bsd-3-clause | Python | |
3f95fd7174f740600c8c11dd2693a7775d06b945 | add missing file | xlqian/navitia,Tisseo/navitia,Tisseo/navitia,kadhikari/navitia,xlqian/navitia,kinnou02/navitia,kinnou02/navitia,Tisseo/navitia,CanalTP/navitia,CanalTP/navitia,Tisseo/navitia,kadhikari/navitia,pbougue/navitia,kadhikari/navitia,kadhikari/navitia,patochectp/navitia,CanalTP/navitia,xlqian/navitia,patochectp/navitia,xlqian/navitia,xlqian/navitia,CanalTP/navitia,pbougue/navitia,Tisseo/navitia,pbougue/navitia,CanalTP/navitia,kinnou02/navitia,kinnou02/navitia,patochectp/navitia,patochectp/navitia,pbougue/navitia | source/jormungandr/jormungandr/scenarios/ridesharing/ridesharing_service.py | source/jormungandr/jormungandr/scenarios/ridesharing/ridesharing_service.py | # Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import abc
import six
import logging
from jormungandr import utils
@six.add_metaclass(abc.ABCMeta)
class AbstractRidesharingService(object):
@abc.abstractmethod
def request_journey(self, from_coord, to_coord, period_extremity, limit=None):
pass
# read the configurations and return the wanted service instance
class Ridesharing(object):
@staticmethod
def get_ridesharing_services(instance, ridesharing_configurations):
logger = logging.getLogger(__name__)
ridesharing_services = []
for config in ridesharing_configurations:
# Set default arguments
if 'args' not in config:
config['args'] = {}
if 'service_url' not in config['args']:
config['args'].update({'service_url': None})
if 'instance' not in config['args']:
config['args'].update({'instance': instance})
try:
service = utils.create_object(config)
except KeyError as e:
raise KeyError('impossible to build a ridesharing service for {}, '
'missing mandatory field in configuration: {}'
.format(instance.name, e.message))
ridesharing_services.append(service)
logger.info('** Ridesharing: {} used for instance: {} **'
.format(type(service).__name__, instance.name))
return ridesharing_services
| agpl-3.0 | Python | |
aff107497d202da3d41541cf0cd24e2c29c6ddbf | Add TCL executor; #58 | DMOJ/judge,DMOJ/judge,buhe/judge,buhe/judge,buhe/judge,buhe/judge,buhe/judge,buhe/judge,DMOJ/judge | executors/TCL.py | executors/TCL.py | from .resource_proxy import ResourceProxy
from .utils import test_executor
from cptbox import SecurePopen, CHROOTSecurity, PIPE
from judgeenv import env
from subprocess import Popen, PIPE as sPIPE
from cptbox.syscalls import *
import errno
TCL_FS = ['.*\.(so|tcl)', '/etc/nsswitch\.conf$', '/etc/passwd$']
class Executor(ResourceProxy):
def __init__(self, problem_id, source_code):
super(Executor, self).__init__()
self._script = source_code_file = self._file('%s.tcl' % problem_id)
with open(source_code_file, 'wb') as fo:
fo.write(source_code)
def _security(self):
security = CHROOTSecurity(TCL_FS)
def sock(debugger):
def socket_return():
debugger.result = -errno.EACCES
debugger.syscall = debugger.getpid_syscall
debugger.on_return(socket_return)
return True
def write(debugger):
if debugger.arg0 > 4: # TCL uses some handles internally
return False
return True
security[sys_tgkill] = True
security[sys_write] = write
security[sys_socket] = sock
security[sys_connect] = True
security[sys_access] = True
security[sys_getsockname] = True
return security
def launch(self, *args, **kwargs):
return SecurePopen(['tclsh', self._script] + list(args),
executable=env['runtime']['tclsh'],
security=self._security(),
time=kwargs.get('time'),
memory=kwargs.get('memory'),
address_grace=131072,
stderr=(PIPE if kwargs.get('pipe_stderr', False) else None),
env={'LANG': 'C'}, cwd=self._dir)
def launch_unsafe(self, *args, **kwargs):
return Popen(['tclsh', self._script] + list(args),
executable=env['runtime']['tclsh'],
env={'LANG': 'C'},
cwd=self._dir,
**kwargs)
def initialize():
if not 'tclsh' in env['runtime']:
return False
return test_executor('TCL', Executor, '''puts "Hello, World!"
''')
| agpl-3.0 | Python | |
46eaacef6240a72089bda049214640c50ec353ec | Add tests for robots APIs | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks | backend/globaleaks/tests/handlers/test_robots.py | backend/globaleaks/tests/handlers/test_robots.py | # -*- coding: utf-8 -*-
import json
from twisted.internet.defer import inlineCallbacks
from globaleaks.handlers import robots
from globaleaks.models import config
from globaleaks.rest import requests
from globaleaks.settings import GLSettings
from globaleaks.tests import helpers
class TestRobotstxtHandlerHandler(helpers.TestHandler):
_handler = robots.RobotstxtHandler
@inlineCallbacks
def test_get_with_indexing_disabled(self):
handler = self.request()
GLSettings.memory_copy.allow_indexing = False
yield handler.get()
self.assertEqual(self.responses[0], "User-agent: *\n")
self.assertEqual(self.responses[1], "Disallow: /")
@inlineCallbacks
def test_get_with_indexing_enabled(self):
handler = self.request()
GLSettings.memory_copy.allow_indexing = True
yield handler.get()
self.assertEqual(self.responses[0], "User-agent: *\n")
self.assertEqual(self.responses[1], "Allow: /")
class TestSitemapHandlerHandler(helpers.TestHandler):
_handler = robots.SitemapHandler
@inlineCallbacks
def test_get_with_indexing_disabled(self):
handler = self.request()
GLSettings.memory_copy.allow_indexing = False
yield handler.get()
self.assertEqual(handler.get_status(), 404)
@inlineCallbacks
def test_get_with_indexing_enabled(self):
handler = self.request()
GLSettings.memory_copy.allow_indexing = True
yield handler.get()
self.assertEqual(handler.get_status(), 200)
class TestAhmiaDescriptionHandler(helpers.TestHandler):
_handler = robots.AhmiaDescriptionHandler
@inlineCallbacks
def test_get_ahmia_disabled(self):
handler = self.request()
GLSettings.memory_copy.ahmia = False
yield handler.get()
self.assertEqual(handler.get_status(), 404)
@inlineCallbacks
def test_get_ahmia_enabled(self):
handler = self.request()
GLSettings.memory_copy.ahmia = True
yield handler.get()
self._handler.validate_message(json.dumps(self.responses[0]), requests.AhmiaDesc)
| agpl-3.0 | Python | |
4fc115d4297f645177854347e32c36128140caf6 | Create blastx_filter.py | stajichlab/localizaTE | scripts/blastx_filter.py | scripts/blastx_filter.py | # -*- coding: utf-8 -*-
##############################################################################################
# Prints the best hit of the blastx along with the alignment lenght and e value.
#############################################################################################
import collections
from collections import OrderedDict
from collections import defaultdict
infile=open("blastx.output", 'r')
outfile=open("prueba", 'w')
x=0
a=0
for line in infile.readlines(): ## Get only desired lines of the Blastn output
if "Query=" in line:
x=x+1
outfile.write('HIT_'+str(x)+'\t'+line)
if "0.0" in line:
if 'Score =' in line:
pass
else:
outfile.write('\t'+line)
if "e-" in line:
if 'Score =' in line:
pass
else:
outfile.write('\t'+line)
if 'No hits found' in line:
outfile.write(line)
infile.close()
outfile.close()
infile2=open('prueba', 'r')
value=0 #open dictionary using "defaultdict", which allows to use lists as values
dictionary=defaultdict(list)
key=0
for line in infile2.readlines():
line=line.strip()
if "Query=" in line: # Lines with "hit" (name of query) are used as keys
hit=line.strip()
hit=line.split('Query= ')
key=hit[1]
#print line
else:
value=line
dictionary[key].append(value) # Appends every hit of each query into a list, used as value in the dictionary
od = collections.OrderedDict(sorted(dictionary.items()))
for k, v in od.iteritems(): #iterates over the dict, to show keys and values
if '*' not in v[0]:
print k+'\t'+v[0]
infile2.close()
| mit | Python | |
ae14f171ea6538bef026f1d1e3441194132eef28 | Add challenge 5 | gcavallo/Python-Challenge | 5.py | 5.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import urllib2, pickle, re
source = pickle.loads(urllib2.urlopen('http://www.pythonchallenge.com/pc/def/banner.p').read())
text = ''.join(x*y for i in source for x,y in i)
print re.sub("(.{95})", "\\1\n", text, 0, re.DOTALL)
| bsd-3-clause | Python | |
3e3e5bb92b1d9e0e1981a6deba41152826c3fce0 | Add script to plot population vs distinct hashtags. | chebee7i/twitter,chebee7i/twitter,chebee7i/twitter | scripts/popvsdistinct.py | scripts/popvsdistinct.py | """
Plot and calculate county population size to number of distinct hashtags.
"""
import matplotlib.pyplot as plt
import seaborn
import pandas
import twitterproj
import scipy.stats
import numpy as np
def populations():
# Grab demographic info
data = {}
df = pandas.read_csv('../census/county/PEP_2013_PEPANNRES_with_ann.csv')
for county in df.values[1:]: # Skip column headers
fips = county[1]
data[fips] = int(county[-1])
return data
def distinct_ht():
# Grab tweet info
data = {}
db = twitterproj.connect()
for county in twitterproj.hashtag_counts__counties(db, bot_filtered=True):
fips = county['geoid']
#data[fips]['tweeted_hashtags'] = sum(county['counts'].values())
data[fips] = len(county['counts'])
return data
def main():
pops = populations()
dhts = distinct_ht()
Y = np.array(dhts.values())
X = np.array([pops[idx] for idx in dhts.keys()])
r, p = scipy.stats.pearsonr(X, Y)
print r , p
plt.scatter(X, Y, s=1, marker='o')
plt.savefig('popdistht.pdf')
plt.title(r'$\rho = {}'.format(r))
if __name__ == '__main__':
main()
| unlicense | Python | |
483076d12e8eb7adec46a59c7ea65ad772de3fe5 | Create __init__.py | coryjog/anemoi,coryjog/anemoi,coryjog/anemoi | anemoi/analysis/__init__.py | anemoi/analysis/__init__.py | mit | Python | ||
d67a74508da9d2cdd384b823a863799655e1f087 | Add users_helper.py | lc-soft/GitDigger,lc-soft/GitDigger,lc-soft/GitDigger,lc-soft/GitDigger | app/helpers/users_helper.py | app/helpers/users_helper.py | from config import site
from random import random
from app.models.user import User
from werkzeug.security import generate_password_hash
class UsersHelper(object):
def __init__(self, app):
self.app = app
def get_user(self, github_id):
return User.query.filter_by(github_id=github_id).first()
def create_user(self, data):
pw = generate_password_hash(str(random()))
email = 'github_%s@%s' % (data['login'], site.config['domain'])
user = User(data['login'], email, pw, data['name'])
user.github_id = data['id']
user.github_username = data['login']
user.avatar_url = data['avatar_url']
user.owner = 'system'
user.bio = data['bio']
return user
| agpl-3.0 | Python | |
03e94f804243eed1f434994cfb5a404dbe410ce3 | Add profiles/gdef test | moyogo/fontbakery,moyogo/fontbakery,graphicore/fontbakery,googlefonts/fontbakery,googlefonts/fontbakery,googlefonts/fontbakery,graphicore/fontbakery,graphicore/fontbakery,moyogo/fontbakery | tests/profiles/gdef_test.py | tests/profiles/gdef_test.py | from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import otTables
from fontbakery.utils import TEST_FILE
from fontbakery.checkrunner import (WARN, PASS)
def get_test_font():
import defcon
import ufo2ft
test_ufo = defcon.Font(TEST_FILE("test.ufo"))
glyph = test_ufo.newGlyph("acute")
glyph.unicode = 0x00B4
glyph = test_ufo.newGlyph("acutecomb")
glyph.unicode = 0x0301
test_ttf = ufo2ft.compileTTF(test_ufo)
return test_ttf
def add_gdef_table(font, class_defs):
font["GDEF"] = gdef = newTable("GDEF")
class_def_table = otTables.GlyphClassDef()
class_def_table.classDefs = class_defs
gdef.table = otTables.GDEF()
gdef.table.GlyphClassDef = class_def_table
def test_check_gdef_spacing_marks():
"""Are some spacing glyphs in GDEF mark glyph class? """
from fontbakery.profiles.gdef import com_google_fonts_check_gdef_spacing_marks as check
test_font = get_test_font()
status, message = list(check(test_font))[-1]
assert status == PASS
assert message == 'Font does not declare an optional "GDEF" table or has '\
'any GDEF glyph class definition.'
add_gdef_table(test_font, {})
status, message = list(check(test_font))[-1]
assert status == PASS
assert message == 'Font does not has spacing glyphs in the GDEF mark glyph class.'
add_gdef_table(test_font, {'A': 3})
status, message = list(check(test_font))[-1]
msg = ": ".join(msg.strip() for msg in str(message).split(":"))
assert (status, msg) == (
WARN,
'The following spacing glyphs may be in the GDEF mark glyph class by '\
'mistake: A [code: spacing-mark-glyphs]'
)
def test_check_gdef_mark_chars():
"""Are some mark characters not in in GDEF mark glyph class? """
from fontbakery.profiles.gdef import com_google_fonts_check_gdef_mark_chars as check
test_font = get_test_font()
status, message = list(check(test_font))[-1]
assert status == PASS
assert message == 'Font does not declare an optional "GDEF" table or has '\
'any GDEF glyph class definition.'
add_gdef_table(test_font, {})
status, message = list(check(test_font))[-1]
assert status == WARN
msg = str(message)
assert msg.split(":")[0], msg.split(":")[1].strip() == (
'The following mark characters could be in the GDEF mark glyph class',
'U+0301'
)
add_gdef_table(test_font, {'acutecomb': 3})
status, message = list(check(test_font))[-1]
assert status, message == (
PASS,
'Font does not have mark characters not in '\
'the GDEF mark glyph class.'
)
def test_check_gdef_non_mark_chars():
"""Are some non-mark characters in GDEF mark glyph class spacing? """
from fontbakery.profiles.gdef import com_google_fonts_check_gdef_non_mark_chars as check
test_font = get_test_font()
status, message = list(check(test_font))[-1]
assert status == PASS
assert message == 'Font does not declare an optional "GDEF" table or has '\
'any GDEF glyph class definition.'
add_gdef_table(test_font, {})
status, message = list(check(test_font))[-1]
assert status == PASS
assert message == 'Font does not have non-mark characters in '\
'the GDEF mark glyph class.'
add_gdef_table(test_font, {'acutecomb': 3})
status, message = list(check(test_font))[-1]
assert status == PASS
assert message == 'Font does not have non-mark characters in '\
'the GDEF mark glyph class.'
add_gdef_table(test_font, {'acute': 3, 'acutecomb': 3})
status, message = list(check(test_font))[-1]
assert status == WARN
msg = str(message)
assert msg.split(":")[0], msg.split(":")[1].strip() == (
'The following non-mark characters should not be in '\
'the GDEF mark glyph class:\n',
'U+00B4'
)
| apache-2.0 | Python | |
695ad7229c5ff1355549ac0e22a498b6fac25947 | add reuse port test | hirolovesbeer/sekiwake,hirolovesbeer/sekiwake | syslog-forwarder/reuseport_forwarder.py | syslog-forwarder/reuseport_forwarder.py | #!/usr/bin/env python
import sys, socket, time
from multiprocessing import Process
PORT = 514
NR_LISTENERS = 2
SO_REUSEPORT = 15
BUFSIZE = 1025
#DST_HOST = "10.206.116.22"
DST_HOST = "192.168.11.13"
def listener_work(num):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # set SO_REUSEADDR
s.setsockopt(socket.SOL_SOCKET, SO_REUSEPORT, 1) # set SO_REUSEPORT
s.bind(("", PORT))
while True:
data, addr = s.recvfrom(BUFSIZE)
s.sendto(data, (DST_HOST, PORT))
def server():
processes = []
for i in range(NR_LISTENERS):
p = Process(target=listener_work, args=(i,))
p.start()
processes.append(p)
for p in processes:
p.join()
def main():
if '-s' in sys.argv:
server()
if __name__ == '__main__':
main()
| mit | Python | |
465a604547e1438e650c8b4142816e2330363767 | Add a test for storing iterable to a list slice. | alex-robbins/micropython,adafruit/circuitpython,PappaPeppar/micropython,adafruit/micropython,SHA2017-badge/micropython-esp32,swegener/micropython,Timmenem/micropython,henriknelson/micropython,adafruit/micropython,TDAbboud/micropython,SHA2017-badge/micropython-esp32,deshipu/micropython,deshipu/micropython,deshipu/micropython,torwag/micropython,adafruit/circuitpython,selste/micropython,toolmacher/micropython,cwyark/micropython,AriZuu/micropython,oopy/micropython,adafruit/circuitpython,chrisdearman/micropython,torwag/micropython,infinnovation/micropython,trezor/micropython,kerneltask/micropython,AriZuu/micropython,swegener/micropython,puuu/micropython,torwag/micropython,pozetroninc/micropython,MrSurly/micropython,blazewicz/micropython,selste/micropython,lowRISC/micropython,dmazzella/micropython,micropython/micropython-esp32,toolmacher/micropython,tobbad/micropython,PappaPeppar/micropython,PappaPeppar/micropython,hiway/micropython,torwag/micropython,PappaPeppar/micropython,blazewicz/micropython,MrSurly/micropython,pramasoul/micropython,selste/micropython,Timmenem/micropython,alex-robbins/micropython,Peetz0r/micropython-esp32,infinnovation/micropython,henriknelson/micropython,selste/micropython,pfalcon/micropython,trezor/micropython,AriZuu/micropython,blazewicz/micropython,alex-robbins/micropython,HenrikSolver/micropython,hiway/micropython,micropython/micropython-esp32,ryannathans/micropython,oopy/micropython,torwag/micropython,tralamazza/micropython,cwyark/micropython,infinnovation/micropython,tobbad/micropython,adafruit/micropython,AriZuu/micropython,henriknelson/micropython,tobbad/micropython,puuu/micropython,MrSurly/micropython-esp32,pfalcon/micropython,micropython/micropython-esp32,TDAbboud/micropython,blazewicz/micropython,kerneltask/micropython,puuu/micropython,adafruit/micropython,AriZuu/micropython,lowRISC/micropython,kerneltask/micropython,dmazzella/micropython,oopy/micropython,pramasoul/micropython,tralamazza/micropython,pozetroninc/micropython,MrSurly/micropython,chrisdearman/micropython,deshipu/micropython,tobbad/micropython,toolmacher/micropython,puuu/micropython,HenrikSolver/micropython,TDAbboud/micropython,swegener/micropython,TDAbboud/micropython,pozetroninc/micropython,MrSurly/micropython-esp32,alex-robbins/micropython,swegener/micropython,Timmenem/micropython,TDAbboud/micropython,deshipu/micropython,trezor/micropython,MrSurly/micropython-esp32,infinnovation/micropython,cwyark/micropython,trezor/micropython,micropython/micropython-esp32,bvernoux/micropython,hiway/micropython,pramasoul/micropython,tralamazza/micropython,MrSurly/micropython,adafruit/circuitpython,trezor/micropython,pfalcon/micropython,chrisdearman/micropython,MrSurly/micropython-esp32,micropython/micropython-esp32,bvernoux/micropython,Peetz0r/micropython-esp32,pfalcon/micropython,hiway/micropython,infinnovation/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython-esp32,cwyark/micropython,kerneltask/micropython,lowRISC/micropython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,dmazzella/micropython,bvernoux/micropython,chrisdearman/micropython,hiway/micropython,lowRISC/micropython,bvernoux/micropython,adafruit/micropython,selste/micropython,Peetz0r/micropython-esp32,lowRISC/micropython,chrisdearman/micropython,henriknelson/micropython,ryannathans/micropython,adafruit/circuitpython,pramasoul/micropython,HenrikSolver/micropython,dmazzella/micropython,Peetz0r/micropython-esp32,PappaPeppar/micropython,pfalcon/micropython,pramasoul/micropython,ryannathans/micropython,oopy/micropython,bvernoux/micropython,Timmenem/micropython,cwyark/micropython,pozetroninc/micropython,Peetz0r/micropython-esp32,kerneltask/micropython,SHA2017-badge/micropython-esp32,tralamazza/micropython,puuu/micropython,ryannathans/micropython,toolmacher/micropython,alex-robbins/micropython,HenrikSolver/micropython,Timmenem/micropython,tobbad/micropython,MrSurly/micropython,ryannathans/micropython,blazewicz/micropython,HenrikSolver/micropython,oopy/micropython,pozetroninc/micropython,swegener/micropython,toolmacher/micropython | tests/cpydiff/types_list_store_noniter.py | tests/cpydiff/types_list_store_noniter.py | """
categories: Types,list
description: List slice-store with non-iterable on RHS is not implemented
cause: RHS is restricted to be a tuple or list
workaround: Use ``list(<iter>)`` on RHS to convert the iterable to a list
"""
l = [10, 20]
l[0:1] = range(4)
print(l)
| mit | Python | |
d6d4f175330638f35d4eb0512ef14f82eab74f50 | Add a debug tool to show advertising broadcasts | tjguk/networkzero,tjguk/networkzero,tjguk/networkzero | show-adverts.py | show-adverts.py | # -*- coding: utf-8 -*-
import os, sys
print(sys.version_info)
import marshal
import select
import socket
import time
def _unpack(message):
return marshal.loads(message)
def _pack(message):
return marshal.dumps(message)
PORT = 9999
MESSAGE_SIZE = 256
#
# Set the socket up to broadcast datagrams over UDP
#
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(("192.168.31.2", PORT))
#
# Add the raw UDP socket to a ZeroMQ socket poller so we can check whether
# it's received anything as part of the beacon's main event loop.
#
print("Listening...")
while True:
rlist, wlist, xlist = select.select([s], [], [], 1)
if s in rlist:
message, source = s.recvfrom(MESSAGE_SIZE)
print("Message: %r, Source: %r" % (message, source))
service_name, service_address = _unpack(message)
print("%s: Found %s at %s" % (time.asctime(), service_name, service_address))
| mit | Python | |
4ae0d59ae2e1c190a87e7561b73f1ce93696f4ab | Sort of start a real project structure | rschuetzler/over-bot | app/__init__.py | app/__init__.py | from flask import Flask
app = Flask(__name__)
from app import views
| mit | Python | |
b5c156fc8023e2752a659ea661c66973f612d753 | add tiny logger | michaelimfeld/adapo | logger.py | logger.py | #!/usr/bin/python
class Logger(object):
"""
tiny logger
"""
SUCCESS = '\033[92m'
ERROR = '\033[91m'
WARN = '\033[93m'
ENDC = '\033[0m'
def info(self, message):
"""
print info message
"""
print "info: %s" % message
def warn(self, message):
"""
print warn message
"""
print "%swarn: %s" % (self.WARN, message + self.ENDC)
def error(self, message):
"""
print error message
"""
print "%serror: %s" % (self.ERROR, message + self.ENDC)
def success(self, message):
"""
print success message
"""
print "%ssuccess: %s" % (self.SUCCESS, message + self.ENDC)
| mit | Python | |
663f7b2fd3db928c33674a32112ed6741e699ded | Add some missing migrations | w0rp/w0rpzone,w0rp/w0rpzone,w0rp/w0rpzone,w0rp/w0rpzone | blog/migrations/0006_auto_20170307_1943.py | blog/migrations/0006_auto_20170307_1943.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2017-03-07 19:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20150705_1607'),
]
operations = [
migrations.RemoveField(
model_name='blogauthor',
name='author',
),
migrations.AlterField(
model_name='article',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='article',
name='modified_date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='articlecomment',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='articlecomment',
name='modified_date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='articlecomment',
name='poster_name',
field=models.CharField(blank=True, default='Anonymous', max_length=255, verbose_name='Name'),
),
migrations.DeleteModel(
name='BlogAuthor',
),
]
| bsd-2-clause | Python | |
11f878977e6e6db9bf8f248e00cc2742835fc75e | add nextproginstr tests | pwndbg/pwndbg,pwndbg/pwndbg,pwndbg/pwndbg,pwndbg/pwndbg | tests/test_commands_next.py | tests/test_commands_next.py | import gdb
import pwndbg.gdblib.regs
import tests
REFERENCE_BINARY = tests.binaries.get("reference-binary.out")
def test_command_nextproginstr_binary_not_running():
out = gdb.execute("nextproginstr", to_string=True)
assert out == "nextproginstr: The program is not being run.\n"
def test_command_nextproginstr(start_binary):
start_binary(REFERENCE_BINARY)
gdb.execute("break main")
gdb.execute("continue")
out = gdb.execute("nextproginstr", to_string=True)
assert out == "The pc is already at the binary objfile code. Not stepping.\n"
# Sanity check
exec_bin_pages = [p for p in pwndbg.vmmap.get() if p.objfile == pwndbg.proc.exe and p.execute]
assert any(pwndbg.gdblib.regs.pc in p for p in exec_bin_pages)
main_page = pwndbg.vmmap.find(pwndbg.gdblib.regs.pc)
gdb.execute("break puts")
gdb.execute("continue")
# Sanity check that we are in libc
libc = "libc.so.6"
assert pwndbg.vmmap.find(pwndbg.gdblib.regs.rip).objfile.endswith(libc)
# Execute nextproginstr and see if we came back to the same vmmap page
gdb.execute("nextproginstr")
assert pwndbg.gdblib.regs.pc in main_page
# Ensure that nextproginstr won't jump now
out = gdb.execute("nextproginstr", to_string=True)
assert out == "The pc is already at the binary objfile code. Not stepping.\n"
| mit | Python | |
4ff639b397d157aba25f83d4f497260d5ffe9e86 | fix optional reports (also needs to run install_views command) | fragaria/BorIS,fragaria/BorIS,fragaria/BorIS | boris/reporting/migrations/0001_initial.py | boris/reporting/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SearchEncounter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_client', models.BooleanField(default=None)),
('is_anonymous', models.BooleanField(default=None)),
('is_close_person', models.BooleanField(default=None)),
('is_sex_partner', models.BooleanField(default=None)),
('is_by_phone', models.BooleanField(default=None)),
('client_sex', models.PositiveSmallIntegerField()),
('primary_drug', models.PositiveSmallIntegerField()),
('primary_drug_usage', models.PositiveSmallIntegerField()),
('performed_on', models.DateField()),
('month', models.SmallIntegerField()),
('year', models.SmallIntegerField()),
('grouping_constant', models.SmallIntegerField()),
],
options={
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SearchService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_type_model', models.CharField(max_length=255)),
('performed_on', models.DateField()),
('month', models.SmallIntegerField()),
('year', models.SmallIntegerField()),
('is_client', models.BooleanField(default=False)),
('is_anonymous', models.BooleanField(default=False)),
('grouping_constant', models.SmallIntegerField()),
],
options={
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SearchSyringeCollection',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.SmallIntegerField()),
('performed_on', models.DateField()),
('month', models.SmallIntegerField()),
('year', models.SmallIntegerField()),
('grouping_constant', models.SmallIntegerField()),
],
options={
'managed': False,
},
bases=(models.Model,),
),
]
| mit | Python | |
bf259361d38a61b4f3248602356dfc4f85b7c3dd | add glim commands | aacanakin/glim | glim/commands.py | glim/commands.py | from command import GlimCommand
from termcolor import colored
from utils import copytree
import os
import traceback
class NewCommand(GlimCommand):
name = 'new'
description = 'generates a new glim app'
def run(self, app):
proto_path = 'glim/proto/project'
currentpath = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
try:
copytree(proto_path, currentpath)
print colored('Created new glim app', 'green')
except Exception, e:
print colored('App already exists', 'red')
class StartCommand(GlimCommand):
name = 'start'
description = 'start the glim app web server'
def configure(self):
self.add_argument("--host", help = "enter host", default = '127.0.0.1')
self.add_argument("--port", help = "enter port", default = '8080')
def run(self, app):
print colored('Glim server started on %s environment' % self.args.env, 'green')
app.start(host = self.args.host, port = self.args.port) | mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.